// SPDX-FileCopyrightText: 2020 Roberto Posenato // // SPDX-License-Identifier: LGPL-3.0-or-later package it.univr.di.cstnu.util; import it.unimi.dsi.fastutil.objects.Object2IntMap; import it.univr.di.cstnu.algorithms.CSTNU; import it.univr.di.cstnu.algorithms.WellDefinitionException; import it.univr.di.cstnu.graph.*; import it.univr.di.labeledvalue.ALabel; import it.univr.di.labeledvalue.Label; import net.openhft.affinity.AffinityStrategies; import net.openhft.affinity.AffinityThreadFactory; import org.kohsuke.args4j.Argument; import org.kohsuke.args4j.CmdLineException; import org.kohsuke.args4j.CmdLineParser; import org.kohsuke.args4j.Option; import org.kohsuke.args4j.spi.StringArrayOptionHandler; import org.xml.sax.SAXException; import javax.annotation.Nonnull; import javax.annotation.Nullable; import javax.xml.parsers.ParserConfigurationException; import java.io.File; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; /** * Reads CSTNU instances and converts them into CSTNPSU (==FTNU) instances, transforming each contingent link into a guarded one. *

* The transformation of a contingent link consists of adding two ordinary constraints between the two nodes of the contingent link with bounds derived from the contingent range. *

*

* If the contingent link is represented by the lower and upper values plus the lower and upper ordinary constraints, this class modifies the ordinary constraints, reducing/increasing them to make a guarded range with the required reduction/increment. *

* * @author posenato * @version $Rev: 733 $ */ @edu.umd.cs.findbugs.annotations.SuppressFBWarnings(value = "STCAL", justification = "It is not relevant here!") public final class CSTNU2CSTNPSU { /** * Version of the class */ static public final String VERSIONandDATE = "Version 1.0 - May, 27 2023"; /** * logger */ static final Logger LOG = Logger.getLogger(CSTNU2CSTNPSU.class.getName()); /** * Date formatter */ private final static SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyy.MM.dd HH:mm:ss"); /** * Allows the modification of a set of input instances. * * @param args an array of {@link String} objects. */ @SuppressWarnings("null") public static void main(final String[] args) { LOG.finest("CSTNU2CSTNPSU " + VERSIONandDATE + "\nStart..."); System.out.println("CSTNU2CSTNPSU " + VERSIONandDATE + "\n" + "\nSPDX-License-Identifier: LGPL-3.0-or-later, Roberto Posenato.\n" + getNow() + ": Start of execution."); final CSTNU2CSTNPSU converter = new CSTNU2CSTNPSU(); if (!converter.manageParameters(args)) { return; } LOG.finest("Parameters ok!"); if (converter.versionReq) { return; } // All parameters are set /* * AffinityLock allows to lock a CPU to a thread. * It seems that allows better performance when a CPU-bound task has to be executed! * To work, some CPUs need to be reserved. * In our server, I modified /boot/grub/grub.cfg, adding "isolcpus=4,5,6,7,8,9,10,11" to the line that boots the kernel to reserve 8 CPUs. * Such CPU have (socketId-coreId): 4(0-4), 5(0-5), 6(1-0), 7(1-1), 8(1-2), 9(1-3), 10(1-4), 11(1-5). * Then I reboot the server. * This class has to be started as normal (no using taskset!) * I don't modify/etc/default/grub and then update-grub because it changes a lot of things. * **NOTE** * After some simulations on AMD Opteron™ 4334, I discovered that: * 0) The best performance is obtained by checking one file at a time! * 1) It isn't worth running more than two processors in parallel because this kind of app does not allow for scaling. For each added process, the performance lowers by about 10%. * 2) Running two processes in two different sockets lowers the performance by about 20%! It is better to run the two processes on the same socket. * 3) Therefore, I modified /boot/grub/grub.cfg setting "isolcpus=8,9,10,11" */ final int nCPUs = converter.nCPUs;// Runtime.getRuntime().availableProcessors(); // Logging stuff for learning Affinity behaviour. // System.out.println("Base CPU affinity mask: " + AffinityLock.BASE_AFFINITY); // System.out.println("Reserved CPU affinity mask: " + AffinityLock.RESERVED_AFFINITY); // System.out.println("Current CPU affinity: " + Affinity.getCpu()); // CpuLayout cpuLayout = AffinityLock.cpuLayout(); // System.out.println("CPU Layout: " + cpuLayout.toString()); // for (int k = 11; k > 3; k--) { // System.out.println("Cpu " + k + "\nSocket: " + cpuLayout.socketId(k) + ". Core:" + cpuLayout.coreId(k)); // } /* * Check all files in parallel. */ /* * 1st method using streams (parallelStream) * Very nice, but it suffers from a known problem with streams: * The use of the default ForkJoinPool in the implementation of parallel() makes it possible that a heavy task can block the following tasks. */ // tester.inputCSTNFile.parallelStream().forEach(file -> cstnWorker(tester, file, executor, edgeFactory)); /* * 2nd method using Callable. * A new FixedThreadPool executor creates nProcessor threads and pipes all processes associated with files to such a pool. * There is no problem if one thread requires a lot of time. * Final synchronization is obtained by requesting .get from Callable. * AffinityThreadFactory allows locking a thread on one core for all the time (less overhead) */ final ExecutorService CSTNU2FTNUExecutor = (nCPUs > 0) ? Executors.newFixedThreadPool(nCPUs, new AffinityThreadFactory("cstnWorker", AffinityStrategies.DIFFERENT_CORE)) : null; System.out.println(getNow() + ": #Processors for computation: " + nCPUs); System.out.println(getNow() + ": Instances to check are STNU instances."); final RunMeter runMeter = new RunMeter(System.currentTimeMillis(), converter.instances.size(), 0); runMeter.printProgress(0); final List> future = new ArrayList<>(10); int nTaskSuccessfullyFinished = 0; for (final File file : converter.instances) { if (nCPUs > 0) { future.add(CSTNU2FTNUExecutor.submit(() -> Boolean.valueOf(converter.worker(file, runMeter)))); } else { if (converter.worker(file, runMeter)) { nTaskSuccessfullyFinished++; } } } if (nCPUs > 0) { // System.out.println(getNow() + ": #Tasks queued: " + future.size()); //Wait, all tasks have been finished and count! for (final Future f : future) { try { if (f.get()) { nTaskSuccessfullyFinished++; } } catch (final Exception ex) { System.out.println("\nA problem occurred during a check: " + ex.getMessage() + ". File ignored."); } finally { if (!f.isDone()) { LOG.warning("It is necessary to cancel the task before continuing."); f.cancel(true); } } } } final String msg = "Number of instances processed successfully over total: " + nTaskSuccessfullyFinished + "/" + converter.instances.size() + "."; LOG.info(msg); System.out.println("\n" + getNow() + ": " + msg); if (nCPUs > 0) { // executor shutdown! try { System.out.println(getNow() + ": Shutdown executors."); CSTNU2FTNUExecutor.shutdown(); if (!CSTNU2FTNUExecutor.awaitTermination(2, TimeUnit.SECONDS)) { System.err.println(getNow() + ": Task is very long to showdown..."); } } catch (final InterruptedException e) { System.err.println(getNow() + ": Task interrupted."); } finally { if (!CSTNU2FTNUExecutor.isTerminated()) { System.err.println(getNow() + ": Cancel non-finished tasks."); } CSTNU2FTNUExecutor.shutdownNow(); System.out.println(getNow() + ": Shutdown finished.\nExecution finished."); } } } /** * @return current time in {@link #dateFormatter} format */ private static String getNow() { return dateFormatter.format(new Date()); } /** * Class for representing an edge. */ Class currentEdgeImplClass; /** * Lower decrement */ @Option(depends = "--decrement", name = "-d", usage = "The percentage [0, 100) of the lower guard to remove for determining the lower bound. Lower bound will always be greater than zero and lower guard - 1 (if positive).") private int lowerDecrement = 20;// 10% less /** * Percentage of the lower guard (derived by lowerDecrement in @manageParameters) */ private double decrementFactor; /** * Upper decrement */ @Option(depends = "--increment", name = "-i", usage = "The percentage [0, 100) of the upper guard to add for determining the upper bound. Upper bound will always be greater than upper guard by at least one unit.") private int upperIncrement = 20;// 10% more /** * Percentage of the upper guard (derived by upperIncrement in @manageParameters) */ private double incrementFactor; /** * The input file names. Each file has to contain a CSTNU graph in GraphML format. */ @Argument(required = true, usage = "Input files. Each input file has to be a DC CSTNU graph in GraphML format. The DC property is fundamental!", metaVar = "CSTNU_file_names", handler = StringArrayOptionHandler.class) private String[] inputFiles; /** * */ private List instances; /** * Roberto: I verified that with such a kind of computation, using more than one thread to check more files in parallel reduces the single performance!!! */ @Option(name = "--nCPUs", usage = "Number of virtual CPUs that are reserved for this execution. The default is 0=no CPU reserved, there is only one thread for all the DC checking executions: such a thread can be allocated to a core, then deallocated and reallocated to another core. With nCPUs=1, there is only a thread, but such a thread is allocated to a core till its end. With more threads, the global performance increases, but each file can require more time because there is competition among threads for access to the memory.") private int nCPUs; /** * To allow the use of different suffixes. */ @Option(name = "--suffix", usage = "The suffix to set for the converted file.") private String suffix = "cstnpsu"; /** * Software Version. */ @Option(name = "-v", aliases = "--version", usage = "Version") private boolean versionReq; /** * It cannot be used outside. */ private CSTNU2CSTNPSU() { } /** * Print a version of this class in System.out. */ public void printVersion() { // I use a non-static method to have a general method that prints the right name for each derived class. System.out.println(getClass().getName() + " " + CSTNU2CSTNPSU.VERSIONandDATE + ".\nAcademic and non-commercial use only.\n" + "Copyright © 2020, Roberto Posenato"); } /** * Given an instance, for each contingent link, it determines lower and upper bound values and adds them as ordinary constraints between the two nodes. * * @param instance input instance to modify * * @return the instance with each contingent link converted into a guarded one. */ @Nullable private TNGraph contingent2guarded(TNGraph instance) { if (instance == null) { return null; } final int nCtg = instance.getContingentNodeCount(); if (LOG.isLoggable(Level.FINER)) { LOG.finer("Converting " + nCtg + " contingent links to guarded ones"); } final CSTNU cstnu = new CSTNU(instance); cstnu.setContingentAlsoAsOrdinary(false); try { cstnu.initAndCheck(); } catch (WellDefinitionException e) { throw new RuntimeException("Trovato errore durante costruzione CSTNU: " + e.getMessage()); } final TNGraph newInstance = new TNGraph<>(cstnu.getG(), instance.getEdgeImplClass()); int lowerBound, upperBound; boolean added; final Set alreadyChecked = new HashSet<>(100); for (final CSTNUEdge e : newInstance.getEdges()) { final Edge.ConstraintType edgeType = e.getConstraintType(); if (edgeType == Edge.ConstraintType.internal || edgeType == Edge.ConstraintType.derived) { //This edge is not necessary newInstance.removeEdge(e.getName()); continue; } if (!e.isContingentEdge() || alreadyChecked.contains(e)) { continue; } final LabeledNode s = newInstance.getSource(e); final LabeledNode d = newInstance.getDest(e); assert s != null; assert d != null; final Label label = s.getLabel().conjunction(d.getLabel()); final CSTNUEdge invertedE = newInstance.findEdge(d, s); alreadyChecked.add(invertedE); if (d.isContingent()) { assert invertedE != null; assert d.getALabel() != null; upperBound = makeUpperBound(invertedE, d.getALabel()); lowerBound = makeLowerBound(e); added = e.mergeLabeledValue(label, upperBound); if (!added) { if (LOG.isLoggable(Level.FINER)) { LOG.finer("Upper bound " + upperBound + " not added to " + e); } } added = invertedE.mergeLabeledValue(label, lowerBound); if (!added) { if (LOG.isLoggable(Level.FINER)) { LOG.finer("Lower bound " + lowerBound + " not added to " + invertedE); } } } else { if (!s.isContingent()) { throw new IllegalStateException("For contingent link " + e + " no one of its end points is contingent."); } assert s.getALabel() != null; upperBound = makeUpperBound(e, s.getALabel()); assert invertedE != null; lowerBound = makeLowerBound(invertedE); added = e.mergeLabeledValue(label, lowerBound); if (!added) { if (LOG.isLoggable(Level.FINER)) { LOG.finer("Lower bound " + upperBound + " not added to " + e); } } added = invertedE.mergeLabeledValue(label, upperBound); if (!added) { if (LOG.isLoggable(Level.FINER)) { LOG.finer("Upper bound " + lowerBound + " not added to " + invertedE); } } } if (LOG.isLoggable(Level.FINER)) { LOG.finer("New contingent link pair:\n\t" + e + " and\n\t" + invertedE); } } return newInstance; } /** * @param fileName the input file name. * * @return new file name with the right suffixes. */ private String getNewFileName(String fileName) { if (fileName == null || !fileName.contains(".cstnu")) { throw new IllegalArgumentException("File name %s is not a CSTNU file name.".formatted(fileName)); } return fileName.replace(".cstnu", suffix); } /** * @param e an edge containing a lower-case value * * @return the lower bound (already negative value) determined using {@link #decrementFactor}*lower case value of e */ private int makeLowerBound(CSTNUEdge e) { //a contingent link has a single upper case value, e.getMinUpperCaseValue() returns it without need to //know return (int) (-e.getLowerCaseValue().getValue() * decrementFactor); } /** * @param e an edge containing an upper-case value * @param aLabel a-label of the contingent node * * @return the upper bound determined using {@link #incrementFactor}*upper case value of e */ private int makeUpperBound(@Nonnull CSTNUEdge e, @Nonnull ALabel aLabel) { //a contingent link has a single upper case value, e.getMinUpperCaseValue() returns it without need to know final Object2IntMap.Entry