View Javadoc
1   package net.bmahe.genetics4j.gpu;
2   
3   import java.io.IOException;
4   import java.nio.charset.StandardCharsets;
5   import java.util.ArrayList;
6   import java.util.HashMap;
7   import java.util.List;
8   import java.util.Map;
9   import java.util.Set;
10  import java.util.concurrent.CompletableFuture;
11  import java.util.concurrent.ExecutorService;
12  
13  import org.apache.commons.collections4.ListUtils;
14  import org.apache.commons.io.IOUtils;
15  import org.apache.commons.lang3.Validate;
16  import org.apache.commons.lang3.tuple.Pair;
17  import org.apache.logging.log4j.LogManager;
18  import org.apache.logging.log4j.Logger;
19  import org.jocl.CL;
20  import org.jocl.cl_command_queue;
21  import org.jocl.cl_context;
22  import org.jocl.cl_context_properties;
23  import org.jocl.cl_device_id;
24  import org.jocl.cl_kernel;
25  import org.jocl.cl_platform_id;
26  import org.jocl.cl_program;
27  import org.jocl.cl_queue_properties;
28  
29  import net.bmahe.genetics4j.core.Genotype;
30  import net.bmahe.genetics4j.core.evaluation.FitnessEvaluator;
31  import net.bmahe.genetics4j.gpu.opencl.DeviceReader;
32  import net.bmahe.genetics4j.gpu.opencl.DeviceUtils;
33  import net.bmahe.genetics4j.gpu.opencl.KernelInfoReader;
34  import net.bmahe.genetics4j.gpu.opencl.OpenCLExecutionContext;
35  import net.bmahe.genetics4j.gpu.opencl.PlatformReader;
36  import net.bmahe.genetics4j.gpu.opencl.PlatformUtils;
37  import net.bmahe.genetics4j.gpu.opencl.model.Device;
38  import net.bmahe.genetics4j.gpu.opencl.model.KernelInfo;
39  import net.bmahe.genetics4j.gpu.opencl.model.Platform;
40  import net.bmahe.genetics4j.gpu.spec.GPUEAConfiguration;
41  import net.bmahe.genetics4j.gpu.spec.GPUEAExecutionContext;
42  import net.bmahe.genetics4j.gpu.spec.Program;
43  
44  /**
45   * GPU-accelerated fitness evaluator that leverages OpenCL for high-performance evolutionary algorithm execution.
46   * 
47   * <p>GPUFitnessEvaluator implements the core {@link FitnessEvaluator} interface to provide GPU acceleration for fitness
48   * computation in evolutionary algorithms. This evaluator manages the complete OpenCL lifecycle, from device discovery
49   * and kernel compilation to memory management and resource cleanup.
50   * 
51   * <p>Key responsibilities include:
52   * <ul>
53   * <li><strong>OpenCL initialization</strong>: Platform and device discovery, context creation, and kernel
54   * compilation</li>
55   * <li><strong>Resource management</strong>: Managing OpenCL contexts, command queues, programs, and kernels</li>
56   * <li><strong>Population partitioning</strong>: Distributing work across multiple OpenCL devices</li>
57   * <li><strong>Asynchronous execution</strong>: Coordinating concurrent GPU operations with CPU-side logic</li>
58   * <li><strong>Memory lifecycle</strong>: Ensuring proper cleanup of GPU resources</li>
59   * </ul>
60   * 
61   * <p>Architecture overview:
62   * <ol>
63   * <li><strong>Initialization ({@link #preEvaluation})</strong>: Discover platforms/devices, compile kernels, create
64   * contexts</li>
65   * <li><strong>Evaluation ({@link #evaluate})</strong>: Partition population, execute fitness computation on GPU</li>
66   * <li><strong>Cleanup ({@link #postEvaluation})</strong>: Release all OpenCL resources and contexts</li>
67   * </ol>
68   * 
69   * <p>Multi-device support:
70   * <ul>
71   * <li><strong>Device filtering</strong>: Selects devices based on user-defined criteria (type, capabilities)</li>
72   * <li><strong>Load balancing</strong>: Automatically distributes population across available devices</li>
73   * <li><strong>Parallel execution</strong>: Concurrent fitness evaluation on multiple GPUs or devices</li>
74   * <li><strong>Asynchronous coordination</strong>: Non-blocking execution with CompletableFuture-based results</li>
75   * </ul>
76   * 
77   * <p>Resource management patterns:
78   * <ul>
79   * <li><strong>Lazy initialization</strong>: OpenCL resources created only when needed</li>
80   * <li><strong>Automatic cleanup</strong>: Guaranteed resource release through lifecycle methods</li>
81   * <li><strong>Error recovery</strong>: Robust handling of OpenCL errors and device failures</li>
82   * <li><strong>Memory optimization</strong>: Efficient GPU memory usage and transfer patterns</li>
83   * </ul>
84   * 
85   * <p>Example usage in GPU EA system:
86   * 
87   * <pre>{@code
88   * // GPU configuration with OpenCL kernel
89   * Program fitnessProgram = Program.ofResource("/kernels/optimization.cl");
90   * GPUEAConfiguration<Double> config = GPUEAConfigurationBuilder.<Double>builder()
91   * 		.program(fitnessProgram)
92   * 		.fitness(new MyGPUFitness())
93   * 		// ... other EA configuration
94   * 		.build();
95   * 
96   * // Execution context with device preferences
97   * GPUEAExecutionContext<Double> context = GPUEAExecutionContextBuilder.<Double>builder()
98   * 		.populationSize(2000)
99   * 		.deviceFilter(device -> device.type() == DeviceType.GPU)
100  * 		.platformFilter(platform -> platform.profile() == PlatformProfile.FULL_PROFILE)
101  * 		.build();
102  * 
103  * // Evaluator handles all OpenCL lifecycle automatically
104  * GPUFitnessEvaluator<Double> evaluator = new GPUFitnessEvaluator<>(context, config, executorService);
105  * 
106  * // Used by EA system - lifecycle managed automatically
107  * EASystem<Double> system = EASystemFactory.from(config, context, executorService, evaluator);
108  * }</pre>
109  * 
110  * <p>Performance characteristics:
111  * <ul>
112  * <li><strong>Initialization overhead</strong>: One-time setup cost for OpenCL compilation and context creation</li>
113  * <li><strong>Scalability</strong>: Performance scales with population size and problem complexity</li>
114  * <li><strong>Memory bandwidth</strong>: Optimal for problems with high computational intensity</li>
115  * <li><strong>Concurrency</strong>: Supports concurrent evaluation across multiple devices</li>
116  * </ul>
117  * 
118  * <p>Error handling:
119  * <ul>
120  * <li><strong>Device failures</strong>: Graceful degradation when devices become unavailable</li>
121  * <li><strong>Memory errors</strong>: Proper cleanup and error reporting for GPU memory issues</li>
122  * <li><strong>Compilation errors</strong>: Clear error messages for kernel compilation failures</li>
123  * <li><strong>Resource leaks</strong>: Guaranteed cleanup even in exceptional circumstances</li>
124  * </ul>
125  * 
126  * @param <T> the type of fitness values produced, must be comparable for selection operations
127  * @see FitnessEvaluator
128  * @see GPUEAConfiguration
129  * @see GPUEAExecutionContext
130  * @see OpenCLExecutionContext
131  * @see net.bmahe.genetics4j.gpu.fitness.OpenCLFitness
132  */
133 public class GPUFitnessEvaluator<T extends Comparable<T>> implements FitnessEvaluator<T> {
134 	public static final Logger logger = LogManager.getLogger(GPUFitnessEvaluator.class);
135 
136 	private final GPUEAExecutionContext<T> gpuEAExecutionContext;
137 	private final GPUEAConfiguration<T> gpuEAConfiguration;
138 	private final ExecutorService executorService;
139 
140 	private List<Pair<Platform, Device>> selectedPlatformToDevice;
141 
142 	final List<cl_context> clContexts = new ArrayList<>();
143 	final List<cl_command_queue> clCommandQueues = new ArrayList<>();
144 	final List<cl_program> clPrograms = new ArrayList<>();
145 	final List<Map<String, cl_kernel>> clKernels = new ArrayList<>();
146 	final List<OpenCLExecutionContext> clExecutionContexts = new ArrayList<>();
147 
148 	/**
149 	 * Constructs a GPU fitness evaluator with the specified configuration and execution context.
150 	 * 
151 	 * <p>Initializes the evaluator with GPU-specific configuration and execution parameters. The evaluator will use the
152 	 * provided executor service for coordinating asynchronous operations between CPU and GPU components.
153 	 * 
154 	 * <p>The constructor performs minimal initialization - the actual OpenCL setup occurs during
155 	 * {@link #preEvaluation()} to follow the fitness evaluator lifecycle pattern.
156 	 * 
157 	 * @param _gpuEAExecutionContext the GPU execution context with device filters and population settings
158 	 * @param _gpuEAConfiguration    the GPU EA configuration with OpenCL program and fitness function
159 	 * @param _executorService       the executor service for managing asynchronous operations
160 	 * @throws IllegalArgumentException if any parameter is null
161 	 */
162 	public GPUFitnessEvaluator(final GPUEAExecutionContext<T> _gpuEAExecutionContext,
163 			final GPUEAConfiguration<T> _gpuEAConfiguration,
164 			final ExecutorService _executorService) {
165 		Validate.notNull(_gpuEAExecutionContext);
166 		Validate.notNull(_gpuEAConfiguration);
167 		Validate.notNull(_executorService);
168 
169 		this.gpuEAExecutionContext = _gpuEAExecutionContext;
170 		this.gpuEAConfiguration = _gpuEAConfiguration;
171 		this.executorService = _executorService;
172 
173 		CL.setExceptionsEnabled(true);
174 	}
175 
176 	private String loadResource(final String filename) {
177 		Validate.notBlank(filename);
178 
179 		try {
180 			return IOUtils.resourceToString(filename, StandardCharsets.UTF_8);
181 		} catch (IOException e) {
182 			throw new IllegalStateException("Unable to load resource " + filename, e);
183 		}
184 	}
185 
186 	private List<String> grabProgramSources() {
187 		final Program programSpec = gpuEAConfiguration.program();
188 
189 		logger.info("Load program source: {}", programSpec);
190 
191 		final List<String> sources = new ArrayList<>();
192 
193 		sources.addAll(programSpec.content());
194 
195 		programSpec.resources().stream().map(resource -> loadResource(resource)).forEach(program -> {
196 			sources.add(program);
197 		});
198 
199 		return sources;
200 	}
201 
202 	/**
203 	 * Initializes OpenCL resources and prepares GPU devices for fitness evaluation.
204 	 * 
205 	 * <p>This method performs the complete OpenCL initialization sequence:
206 	 * <ol>
207 	 * <li><strong>Platform discovery</strong>: Enumerates available OpenCL platforms</li>
208 	 * <li><strong>Device filtering</strong>: Selects devices based on configured filters</li>
209 	 * <li><strong>Context creation</strong>: Creates OpenCL contexts for selected devices</li>
210 	 * <li><strong>Queue setup</strong>: Creates command queues with profiling and out-of-order execution</li>
211 	 * <li><strong>Program compilation</strong>: Compiles OpenCL kernels from source code</li>
212 	 * <li><strong>Kernel preparation</strong>: Creates kernel objects and queries execution info</li>
213 	 * <li><strong>Fitness initialization</strong>: Calls lifecycle hooks on the fitness function</li>
214 	 * </ol>
215 	 * 
216 	 * <p>Device selection process:
217 	 * <ul>
218 	 * <li>Applies platform filters to discovered OpenCL platforms</li>
219 	 * <li>Enumerates devices for each qualifying platform</li>
220 	 * <li>Applies device filters to select appropriate devices</li>
221 	 * <li>Validates that at least one device is available</li>
222 	 * </ul>
223 	 * 
224 	 * <p>The method creates separate OpenCL contexts for each selected device to enable concurrent execution and optimal
225 	 * resource utilization. Each context includes compiled programs and kernel objects ready for fitness evaluation.
226 	 * 
227 	 * @throws IllegalStateException if no compatible devices are found
228 	 * @throws RuntimeException      if OpenCL initialization, program compilation, or kernel creation fails
229 	 */
230 	@Override
231 	public void preEvaluation() {
232 		logger.trace("Init...");
233 		FitnessEvaluator.super.preEvaluation();
234 
235 		final var platformReader = new PlatformReader();
236 		final var deviceReader = new DeviceReader();
237 		final var kernelInfoReader = new KernelInfoReader();
238 
239 		final int numPlatforms = PlatformUtils.numPlatforms();
240 		logger.info("Found {} platforms", numPlatforms);
241 
242 		final List<cl_platform_id> platformIds = PlatformUtils.platformIds(numPlatforms);
243 
244 		logger.info("Selecting platform and devices");
245 		final var platformFilters = gpuEAExecutionContext.platformFilters();
246 		final var deviceFilters = gpuEAExecutionContext.deviceFilters();
247 
248 		selectedPlatformToDevice = platformIds.stream()
249 				.map(platformReader::read)
250 				.filter(platformFilters)
251 				.flatMap(platform -> {
252 					final var platformId = platform.platformId();
253 					final int numDevices = DeviceUtils.numDevices(platformId);
254 					logger.trace("\tPlatform {}: {} devices", platform.name(), numDevices);
255 
256 					final var deviceIds = DeviceUtils.getDeviceIds(platformId, numDevices);
257 					return deviceIds.stream().map(deviceId -> Pair.of(platform, deviceId));
258 				})
259 				.map(platformToDeviceId -> {
260 					final var platform = platformToDeviceId.getLeft();
261 					final var platformId = platform.platformId();
262 					final var deviceID = platformToDeviceId.getRight();
263 
264 					return Pair.of(platform, deviceReader.read(platformId, deviceID));
265 				})
266 				.filter(platformToDevice -> deviceFilters.test(platformToDevice.getRight()))
267 				.toList();
268 
269 		if (logger.isTraceEnabled()) {
270 			logger.trace("============================");
271 			logger.trace("Selected devices:");
272 			selectedPlatformToDevice.forEach(pd -> {
273 				logger.trace("{}", pd.getLeft());
274 				logger.trace("\t{}", pd.getRight());
275 			});
276 			logger.trace("============================");
277 		}
278 
279 		Validate.isTrue(selectedPlatformToDevice.size() > 0);
280 
281 		final List<String> programs = grabProgramSources();
282 		final String[] programsArr = programs.toArray(new String[programs.size()]);
283 
284 		for (final var platformAndDevice : selectedPlatformToDevice) {
285 			final var platform = platformAndDevice.getLeft();
286 			final var device = platformAndDevice.getRight();
287 
288 			logger.info("Processing platform [{}] / device [{}]", platform.name(), device.name());
289 
290 			logger.info("\tCreating context");
291 			cl_context_properties contextProperties = new cl_context_properties();
292 			contextProperties.addProperty(CL.CL_CONTEXT_PLATFORM, platform.platformId());
293 
294 			final cl_context context = CL
295 					.clCreateContext(contextProperties, 1, new cl_device_id[] { device.deviceId() }, null, null, null);
296 
297 			logger.info("\tCreating command queue");
298 			final cl_queue_properties queueProperties = new cl_queue_properties();
299 			queueProperties.addProperty(
300 					CL.CL_QUEUE_PROPERTIES,
301 						CL.CL_QUEUE_PROFILING_ENABLE | CL.CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE);
302 			final cl_command_queue commandQueue = CL
303 					.clCreateCommandQueueWithProperties(context, device.deviceId(), queueProperties, null);
304 
305 			logger.info("\tCreate program");
306 			final cl_program program = CL.clCreateProgramWithSource(context, programsArr.length, programsArr, null, null);
307 
308 			final var programSpec = gpuEAConfiguration.program();
309 			final var buildOptions = programSpec.buildOptions().orElse(null);
310 			logger.info("\tBuilding program with options: {}", buildOptions);
311 			CL.clBuildProgram(program, 0, null, buildOptions, null, null);
312 
313 			final Set<String> kernelNames = gpuEAConfiguration.program().kernelNames();
314 
315 			final Map<String, cl_kernel> kernels = new HashMap<>();
316 			final Map<String, KernelInfo> kernelInfos = new HashMap<>();
317 			for (final String kernelName : kernelNames) {
318 
319 				logger.info("\tCreate kernel {}", kernelName);
320 				final cl_kernel kernel = CL.clCreateKernel(program, kernelName, null);
321 				Validate.notNull(kernel);
322 
323 				kernels.put(kernelName, kernel);
324 
325 				final var kernelInfo = kernelInfoReader.read(device.deviceId(), kernel, kernelName);
326 				logger.trace("\t{}", kernelInfo);
327 				kernelInfos.put(kernelName, kernelInfo);
328 			}
329 
330 			clContexts.add(context);
331 			clCommandQueues.add(commandQueue);
332 			clKernels.add(kernels);
333 			clPrograms.add(program);
334 
335 			final var openCLExecutionContext = OpenCLExecutionContext.builder()
336 					.platform(platform)
337 					.device(device)
338 					.clContext(context)
339 					.clCommandQueue(commandQueue)
340 					.kernels(kernels)
341 					.kernelInfos(kernelInfos)
342 					.clProgram(program)
343 					.build();
344 
345 			clExecutionContexts.add(openCLExecutionContext);
346 		}
347 
348 		final var fitness = gpuEAConfiguration.fitness();
349 		fitness.beforeAllEvaluations();
350 		for (final OpenCLExecutionContext clExecutionContext : clExecutionContexts) {
351 			fitness.beforeAllEvaluations(clExecutionContext, executorService);
352 		}
353 	}
354 
355 	/**
356 	 * Evaluates fitness for a population of genotypes using GPU acceleration.
357 	 * 
358 	 * <p>This method implements the core fitness evaluation logic by distributing the population across available OpenCL
359 	 * devices and executing fitness computation concurrently. The evaluation process follows these steps:
360 	 * 
361 	 * <ol>
362 	 * <li><strong>Population partitioning</strong>: Divides genotypes across available devices</li>
363 	 * <li><strong>Parallel dispatch</strong>: Submits evaluation tasks to each device asynchronously</li>
364 	 * <li><strong>GPU execution</strong>: Executes OpenCL kernels for fitness computation</li>
365 	 * <li><strong>Result collection</strong>: Gathers fitness values from all devices</li>
366 	 * <li><strong>Result aggregation</strong>: Combines results preserving original order</li>
367 	 * </ol>
368 	 * 
369 	 * <p>Load balancing strategy:
370 	 * <ul>
371 	 * <li>Automatically calculates partition size based on population and device count</li>
372 	 * <li>Round-robin assignment of partitions to devices for balanced workload</li>
373 	 * <li>Asynchronous execution allows devices to work at their optimal pace</li>
374 	 * </ul>
375 	 * 
376 	 * <p>The method coordinates with the configured fitness function through lifecycle hooks:
377 	 * <ul>
378 	 * <li>{@code beforeEvaluation()}: Called before each device partition evaluation</li>
379 	 * <li>{@code compute()}: Executes the actual GPU fitness computation</li>
380 	 * <li>{@code afterEvaluation()}: Called after each device partition completes</li>
381 	 * </ul>
382 	 * 
383 	 * <p>Concurrency and performance:
384 	 * <ul>
385 	 * <li>Multiple devices execute evaluation partitions concurrently</li>
386 	 * <li>CompletableFuture-based coordination for non-blocking execution</li>
387 	 * <li>Automatic workload distribution across available GPU resources</li>
388 	 * </ul>
389 	 * 
390 	 * @param generation the current generation number for context and logging
391 	 * @param genotypes  the population of genotypes to evaluate
392 	 * @return fitness values corresponding to each genotype in the same order
393 	 * @throws IllegalArgumentException if genotypes is null or empty
394 	 * @throws RuntimeException         if GPU evaluation fails or OpenCL errors occur
395 	 */
396 	@Override
397 	public List<T> evaluate(final long generation, final List<Genotype> genotypes) {
398 
399 		final var fitness = gpuEAConfiguration.fitness();
400 
401 		/**
402 		 * TODO make it configurable from execution context
403 		 */
404 		final int partitionSize = (int) (Math.ceil((double) genotypes.size() / clExecutionContexts.size()));
405 		final var subGenotypes = ListUtils.partition(genotypes, partitionSize);
406 		logger.debug("Genotype decomposed in {} partition(s)", subGenotypes.size());
407 		if (logger.isTraceEnabled()) {
408 			for (int i = 0; i < subGenotypes.size(); i++) {
409 				final List<Genotype> subGenotype = subGenotypes.get(i);
410 				logger.trace("\tPartition {} with {} elements", i, subGenotype.size());
411 			}
412 		}
413 
414 		final List<CompletableFuture<List<T>>> subResultsCF = new ArrayList<>();
415 		for (int i = 0; i < subGenotypes.size(); i++) {
416 			final var openCLExecutionContext = clExecutionContexts.get(i % clExecutionContexts.size());
417 			final var subGenotype = subGenotypes.get(i);
418 
419 			fitness.beforeEvaluation(generation, subGenotype);
420 			fitness.beforeEvaluation(openCLExecutionContext, executorService, generation, subGenotype);
421 
422 			final var resultsCF = fitness.compute(openCLExecutionContext, executorService, generation, subGenotype)
423 					.thenApply((results) -> {
424 
425 						fitness.afterEvaluation(openCLExecutionContext, executorService, generation, subGenotype);
426 						fitness.afterEvaluation(generation, subGenotype);
427 
428 						return results;
429 					});
430 
431 			subResultsCF.add(resultsCF);
432 		}
433 
434 		final List<T> resultsEvaluation = new ArrayList<>(genotypes.size());
435 		for (final CompletableFuture<List<T>> subResultCF : subResultsCF) {
436 			final var fitnessResults = subResultCF.join();
437 			resultsEvaluation.addAll(fitnessResults);
438 		}
439 		return resultsEvaluation;
440 	}
441 
442 	/**
443 	 * Cleans up OpenCL resources and releases GPU memory after evaluation completion.
444 	 * 
445 	 * <p>This method performs comprehensive cleanup of all OpenCL resources in the proper order to prevent memory leaks
446 	 * and ensure clean shutdown. The cleanup sequence follows OpenCL best practices for resource deallocation:
447 	 * 
448 	 * <ol>
449 	 * <li><strong>Fitness cleanup</strong>: Calls lifecycle hooks on the fitness function</li>
450 	 * <li><strong>Kernel release</strong>: Releases all compiled kernel objects</li>
451 	 * <li><strong>Program release</strong>: Releases compiled OpenCL programs</li>
452 	 * <li><strong>Queue release</strong>: Releases command queues and pending operations</li>
453 	 * <li><strong>Context release</strong>: Releases OpenCL contexts and associated memory</li>
454 	 * <li><strong>Reference cleanup</strong>: Clears internal data structures and references</li>
455 	 * </ol>
456 	 * 
457 	 * <p>Resource management guarantees:
458 	 * <ul>
459 	 * <li>All GPU memory allocations are properly released</li>
460 	 * <li>OpenCL objects are released in dependency order to avoid errors</li>
461 	 * <li>No resource leaks occur even if individual cleanup operations fail</li>
462 	 * <li>Evaluator returns to a clean state ready for potential reinitialization</li>
463 	 * </ul>
464 	 * 
465 	 * <p>The method coordinates with the configured fitness function to ensure any fitness-specific resources (buffers,
466 	 * textures, etc.) are also properly cleaned up through the {@code afterAllEvaluations()} lifecycle hooks.
467 	 * 
468 	 * @throws RuntimeException if cleanup operations fail (logged but not propagated to prevent interference with EA
469 	 *                          system shutdown)
470 	 */
471 	@Override
472 	public void postEvaluation() {
473 
474 		final var fitness = gpuEAConfiguration.fitness();
475 
476 		for (final OpenCLExecutionContext clExecutionContext : clExecutionContexts) {
477 			fitness.afterAllEvaluations(clExecutionContext, executorService);
478 		}
479 		fitness.afterAllEvaluations();
480 
481 		logger.debug("Releasing kernels");
482 
483 		for (final Map<String, cl_kernel> kernels : clKernels) {
484 			for (final cl_kernel clKernel : kernels.values()) {
485 				CL.clReleaseKernel(clKernel);
486 			}
487 		}
488 		clKernels.clear();
489 
490 		logger.debug("Releasing programs");
491 		for (final cl_program clProgram : clPrograms) {
492 			CL.clReleaseProgram(clProgram);
493 		}
494 		clPrograms.clear();
495 
496 		logger.debug("Releasing command queues");
497 		for (final cl_command_queue clCommandQueue : clCommandQueues) {
498 			CL.clReleaseCommandQueue(clCommandQueue);
499 		}
500 		clCommandQueues.clear();
501 
502 		logger.debug("Releasing contexts");
503 		for (final cl_context clContext : clContexts) {
504 			CL.clReleaseContext(clContext);
505 		}
506 		clContexts.clear();
507 
508 		clExecutionContexts.clear();
509 		selectedPlatformToDevice = null;
510 
511 		FitnessEvaluator.super.postEvaluation();
512 	}
513 }