View Javadoc
1   package net.bmahe.genetics4j.gpu;
2   
3   import java.io.IOException;
4   import java.nio.charset.StandardCharsets;
5   import java.util.ArrayList;
6   import java.util.HashMap;
7   import java.util.List;
8   import java.util.Map;
9   import java.util.Set;
10  import java.util.concurrent.CompletableFuture;
11  import java.util.concurrent.ExecutorService;
12  
13  import org.apache.commons.collections4.ListUtils;
14  import org.apache.commons.io.IOUtils;
15  import org.apache.commons.lang3.Validate;
16  import org.apache.commons.lang3.tuple.Pair;
17  import org.apache.logging.log4j.LogManager;
18  import org.apache.logging.log4j.Logger;
19  import org.jocl.CL;
20  import org.jocl.cl_command_queue;
21  import org.jocl.cl_context;
22  import org.jocl.cl_context_properties;
23  import org.jocl.cl_device_id;
24  import org.jocl.cl_kernel;
25  import org.jocl.cl_platform_id;
26  import org.jocl.cl_program;
27  import org.jocl.cl_queue_properties;
28  
29  import net.bmahe.genetics4j.core.Genotype;
30  import net.bmahe.genetics4j.core.evaluation.FitnessEvaluator;
31  import net.bmahe.genetics4j.gpu.opencl.DeviceReader;
32  import net.bmahe.genetics4j.gpu.opencl.DeviceUtils;
33  import net.bmahe.genetics4j.gpu.opencl.KernelInfoReader;
34  import net.bmahe.genetics4j.gpu.opencl.OpenCLExecutionContext;
35  import net.bmahe.genetics4j.gpu.opencl.PlatformReader;
36  import net.bmahe.genetics4j.gpu.opencl.PlatformUtils;
37  import net.bmahe.genetics4j.gpu.opencl.model.Device;
38  import net.bmahe.genetics4j.gpu.opencl.model.KernelInfo;
39  import net.bmahe.genetics4j.gpu.opencl.model.Platform;
40  import net.bmahe.genetics4j.gpu.spec.GPUEAConfiguration;
41  import net.bmahe.genetics4j.gpu.spec.GPUEAExecutionContext;
42  import net.bmahe.genetics4j.gpu.spec.Program;
43  
44  /**
45   * GPU-accelerated fitness evaluator that leverages OpenCL for high-performance evolutionary algorithm execution.
46   * 
47   * <p>GPUFitnessEvaluator implements the core {@link FitnessEvaluator} interface to provide GPU acceleration for fitness
48   * computation in evolutionary algorithms. This evaluator manages the complete OpenCL lifecycle, from device discovery
49   * and kernel compilation to memory management and resource cleanup.
50   * 
51   * <p>Key responsibilities include:
52   * <ul>
53   * <li><strong>OpenCL initialization</strong>: Platform and device discovery, context creation, and kernel
54   * compilation</li>
55   * <li><strong>Resource management</strong>: Managing OpenCL contexts, command queues, programs, and kernels</li>
56   * <li><strong>Population partitioning</strong>: Distributing work across multiple OpenCL devices</li>
57   * <li><strong>Asynchronous execution</strong>: Coordinating concurrent GPU operations with CPU-side logic</li>
58   * <li><strong>Memory lifecycle</strong>: Ensuring proper cleanup of GPU resources</li>
59   * </ul>
60   * 
61   * <p>Architecture overview:
62   * <ol>
63   * <li><strong>Initialization ({@link #preEvaluation})</strong>: Discover platforms/devices, compile kernels, create
64   * contexts</li>
65   * <li><strong>Evaluation ({@link #evaluate})</strong>: Partition population, execute fitness computation on GPU</li>
66   * <li><strong>Cleanup ({@link #postEvaluation})</strong>: Release all OpenCL resources and contexts</li>
67   * </ol>
68   * 
69   * <p>Multi-device support:
70   * <ul>
71   * <li><strong>Device filtering</strong>: Selects devices based on user-defined criteria (type, capabilities)</li>
72   * <li><strong>Load balancing</strong>: Automatically distributes population across available devices</li>
73   * <li><strong>Parallel execution</strong>: Concurrent fitness evaluation on multiple GPUs or devices</li>
74   * <li><strong>Asynchronous coordination</strong>: Non-blocking execution with CompletableFuture-based results</li>
75   * </ul>
76   * 
77   * <p>Resource management patterns:
78   * <ul>
79   * <li><strong>Lazy initialization</strong>: OpenCL resources created only when needed</li>
80   * <li><strong>Automatic cleanup</strong>: Guaranteed resource release through lifecycle methods</li>
81   * <li><strong>Error recovery</strong>: Robust handling of OpenCL errors and device failures</li>
82   * <li><strong>Memory optimization</strong>: Efficient GPU memory usage and transfer patterns</li>
83   * </ul>
84   * 
85   * <p>Example usage in GPU EA system:
86   * 
87   * <pre>{@code
88   * // GPU configuration with OpenCL kernel
89   * Program fitnessProgram = Program.ofResource("/kernels/optimization.cl");
90   * GPUEAConfiguration<Double> config = GPUEAConfigurationBuilder.<Double>builder()
91   * 		.program(fitnessProgram)
92   * 		.fitness(new MyGPUFitness())
93   * 		// ... other EA configuration
94   * 		.build();
95   * 
96   * // Execution context with device preferences
97   * GPUEAExecutionContext<Double> context = GPUEAExecutionContextBuilder.<Double>builder()
98   * 		.populationSize(2000)
99   * 		.deviceFilter(device -> device.type() == DeviceType.GPU)
100  * 		.platformFilter(platform -> platform.profile() == PlatformProfile.FULL_PROFILE)
101  * 		.build();
102  * 
103  * // Evaluator handles all OpenCL lifecycle automatically
104  * GPUFitnessEvaluator<Double> evaluator = new GPUFitnessEvaluator<>(context, config, executorService);
105  * 
106  * // Used by EA system - lifecycle managed automatically
107  * EASystem<Double> system = EASystemFactory.from(config, context, executorService, evaluator);
108  * }</pre>
109  * 
110  * <p>Performance characteristics:
111  * <ul>
112  * <li><strong>Initialization overhead</strong>: One-time setup cost for OpenCL compilation and context creation</li>
113  * <li><strong>Scalability</strong>: Performance scales with population size and problem complexity</li>
114  * <li><strong>Memory bandwidth</strong>: Optimal for problems with high computational intensity</li>
115  * <li><strong>Concurrency</strong>: Supports concurrent evaluation across multiple devices</li>
116  * </ul>
117  * 
118  * <p>Error handling:
119  * <ul>
120  * <li><strong>Device failures</strong>: Graceful degradation when devices become unavailable</li>
121  * <li><strong>Memory errors</strong>: Proper cleanup and error reporting for GPU memory issues</li>
122  * <li><strong>Compilation errors</strong>: Clear error messages for kernel compilation failures</li>
123  * <li><strong>Resource leaks</strong>: Guaranteed cleanup even in exceptional circumstances</li>
124  * </ul>
125  * 
126  * @param <T> the type of fitness values produced, must be comparable for selection operations
127  * @see FitnessEvaluator
128  * @see GPUEAConfiguration
129  * @see GPUEAExecutionContext
130  * @see OpenCLExecutionContext
131  * @see net.bmahe.genetics4j.gpu.fitness.OpenCLFitness
132  */
133 public class GPUFitnessEvaluator<T extends Comparable<T>> implements FitnessEvaluator<T> {
134 	public static final Logger logger = LogManager.getLogger(GPUFitnessEvaluator.class);
135 
136 	private final GPUEAExecutionContext<T> gpuEAExecutionContext;
137 	private final GPUEAConfiguration<T> gpuEAConfiguration;
138 	private final ExecutorService executorService;
139 
140 	private List<Pair<Platform, Device>> selectedPlatformToDevice;
141 
142 	final List<cl_context> clContexts = new ArrayList<>();
143 	final List<cl_command_queue> clCommandQueues = new ArrayList<>();
144 	final List<cl_program> clPrograms = new ArrayList<>();
145 	final List<Map<String, cl_kernel>> clKernels = new ArrayList<>();
146 	final List<OpenCLExecutionContext> clExecutionContexts = new ArrayList<>();
147 
148 	/**
149 	 * Constructs a GPU fitness evaluator with the specified configuration and execution context.
150 	 * 
151 	 * <p>Initializes the evaluator with GPU-specific configuration and execution parameters. The evaluator will use the
152 	 * provided executor service for coordinating asynchronous operations between CPU and GPU components.
153 	 * 
154 	 * <p>The constructor performs minimal initialization - the actual OpenCL setup occurs during
155 	 * {@link #preEvaluation()} to follow the fitness evaluator lifecycle pattern.
156 	 * 
157 	 * @param _gpuEAExecutionContext the GPU execution context with device filters and population settings
158 	 * @param _gpuEAConfiguration    the GPU EA configuration with OpenCL program and fitness function
159 	 * @param _executorService       the executor service for managing asynchronous operations
160 	 * @throws IllegalArgumentException if any parameter is null
161 	 */
162 	public GPUFitnessEvaluator(final GPUEAExecutionContext<T> _gpuEAExecutionContext,
163 			final GPUEAConfiguration<T> _gpuEAConfiguration, final ExecutorService _executorService) {
164 		Validate.notNull(_gpuEAExecutionContext);
165 		Validate.notNull(_gpuEAConfiguration);
166 		Validate.notNull(_executorService);
167 
168 		this.gpuEAExecutionContext = _gpuEAExecutionContext;
169 		this.gpuEAConfiguration = _gpuEAConfiguration;
170 		this.executorService = _executorService;
171 
172 		CL.setExceptionsEnabled(true);
173 	}
174 
175 	private String loadResource(final String filename) {
176 		Validate.notBlank(filename);
177 
178 		try {
179 			return IOUtils.resourceToString(filename, StandardCharsets.UTF_8);
180 		} catch (IOException e) {
181 			throw new IllegalStateException("Unable to load resource " + filename, e);
182 		}
183 	}
184 
185 	private List<String> grabProgramSources() {
186 		final Program programSpec = gpuEAConfiguration.program();
187 
188 		logger.info("Load program source: {}", programSpec);
189 
190 		final List<String> sources = new ArrayList<>();
191 
192 		sources.addAll(programSpec.content());
193 
194 		programSpec.resources()
195 				.stream()
196 				.map(resource -> loadResource(resource))
197 				.forEach(program -> {
198 					sources.add(program);
199 				});
200 
201 		return sources;
202 	}
203 
204 	/**
205 	 * Initializes OpenCL resources and prepares GPU devices for fitness evaluation.
206 	 * 
207 	 * <p>This method performs the complete OpenCL initialization sequence:
208 	 * <ol>
209 	 * <li><strong>Platform discovery</strong>: Enumerates available OpenCL platforms</li>
210 	 * <li><strong>Device filtering</strong>: Selects devices based on configured filters</li>
211 	 * <li><strong>Context creation</strong>: Creates OpenCL contexts for selected devices</li>
212 	 * <li><strong>Queue setup</strong>: Creates command queues with profiling and out-of-order execution</li>
213 	 * <li><strong>Program compilation</strong>: Compiles OpenCL kernels from source code</li>
214 	 * <li><strong>Kernel preparation</strong>: Creates kernel objects and queries execution info</li>
215 	 * <li><strong>Fitness initialization</strong>: Calls lifecycle hooks on the fitness function</li>
216 	 * </ol>
217 	 * 
218 	 * <p>Device selection process:
219 	 * <ul>
220 	 * <li>Applies platform filters to discovered OpenCL platforms</li>
221 	 * <li>Enumerates devices for each qualifying platform</li>
222 	 * <li>Applies device filters to select appropriate devices</li>
223 	 * <li>Validates that at least one device is available</li>
224 	 * </ul>
225 	 * 
226 	 * <p>The method creates separate OpenCL contexts for each selected device to enable concurrent execution and optimal
227 	 * resource utilization. Each context includes compiled programs and kernel objects ready for fitness evaluation.
228 	 * 
229 	 * @throws IllegalStateException if no compatible devices are found
230 	 * @throws RuntimeException      if OpenCL initialization, program compilation, or kernel creation fails
231 	 */
232 	@Override
233 	public void preEvaluation() {
234 		logger.trace("Init...");
235 		FitnessEvaluator.super.preEvaluation();
236 
237 		final var platformReader = new PlatformReader();
238 		final var deviceReader = new DeviceReader();
239 		final var kernelInfoReader = new KernelInfoReader();
240 
241 		final int numPlatforms = PlatformUtils.numPlatforms();
242 		logger.info("Found {} platforms", numPlatforms);
243 
244 		final List<cl_platform_id> platformIds = PlatformUtils.platformIds(numPlatforms);
245 
246 		logger.info("Selecting platform and devices");
247 		final var platformFilters = gpuEAExecutionContext.platformFilters();
248 		final var deviceFilters = gpuEAExecutionContext.deviceFilters();
249 
250 		selectedPlatformToDevice = platformIds.stream()
251 				.map(platformReader::read)
252 				.filter(platformFilters)
253 				.flatMap(platform -> {
254 					final var platformId = platform.platformId();
255 					final int numDevices = DeviceUtils.numDevices(platformId);
256 					logger.trace("\tPlatform {}: {} devices", platform.name(), numDevices);
257 
258 					final var deviceIds = DeviceUtils.getDeviceIds(platformId, numDevices);
259 					return deviceIds.stream()
260 							.map(deviceId -> Pair.of(platform, deviceId));
261 				})
262 				.map(platformToDeviceId -> {
263 					final var platform = platformToDeviceId.getLeft();
264 					final var platformId = platform.platformId();
265 					final var deviceID = platformToDeviceId.getRight();
266 
267 					return Pair.of(platform, deviceReader.read(platformId, deviceID));
268 				})
269 				.filter(platformToDevice -> deviceFilters.test(platformToDevice.getRight()))
270 				.toList();
271 
272 		if (logger.isTraceEnabled()) {
273 			logger.trace("============================");
274 			logger.trace("Selected devices:");
275 			selectedPlatformToDevice.forEach(pd -> {
276 				logger.trace("{}", pd.getLeft());
277 				logger.trace("\t{}", pd.getRight());
278 			});
279 			logger.trace("============================");
280 		}
281 
282 		Validate.isTrue(selectedPlatformToDevice.size() > 0);
283 
284 		final List<String> programs = grabProgramSources();
285 		final String[] programsArr = programs.toArray(new String[programs.size()]);
286 
287 		for (final var platformAndDevice : selectedPlatformToDevice) {
288 			final var platform = platformAndDevice.getLeft();
289 			final var device = platformAndDevice.getRight();
290 
291 			logger.info("Processing platform [{}] / device [{}]", platform.name(), device.name());
292 
293 			logger.info("\tCreating context");
294 			cl_context_properties contextProperties = new cl_context_properties();
295 			contextProperties.addProperty(CL.CL_CONTEXT_PLATFORM, platform.platformId());
296 
297 			final cl_context context = CL
298 					.clCreateContext(contextProperties, 1, new cl_device_id[] { device.deviceId() }, null, null, null);
299 
300 			logger.info("\tCreating command queue");
301 			final cl_queue_properties queueProperties = new cl_queue_properties();
302 			queueProperties.addProperty(CL.CL_QUEUE_PROPERTIES,
303 					CL.CL_QUEUE_PROFILING_ENABLE | CL.CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE);
304 			final cl_command_queue commandQueue = CL
305 					.clCreateCommandQueueWithProperties(context, device.deviceId(), queueProperties, null);
306 
307 			logger.info("\tCreate program");
308 			final cl_program program = CL.clCreateProgramWithSource(context, programsArr.length, programsArr, null, null);
309 
310 			final var programSpec = gpuEAConfiguration.program();
311 			final var buildOptions = programSpec.buildOptions()
312 					.orElse(null);
313 			logger.info("\tBuilding program with options: {}", buildOptions);
314 			CL.clBuildProgram(program, 0, null, buildOptions, null, null);
315 
316 			final Set<String> kernelNames = gpuEAConfiguration.program()
317 					.kernelNames();
318 
319 			final Map<String, cl_kernel> kernels = new HashMap<>();
320 			final Map<String, KernelInfo> kernelInfos = new HashMap<>();
321 			for (final String kernelName : kernelNames) {
322 
323 				logger.info("\tCreate kernel {}", kernelName);
324 				final cl_kernel kernel = CL.clCreateKernel(program, kernelName, null);
325 				Validate.notNull(kernel);
326 
327 				kernels.put(kernelName, kernel);
328 
329 				final var kernelInfo = kernelInfoReader.read(device.deviceId(), kernel, kernelName);
330 				logger.trace("\t{}", kernelInfo);
331 				kernelInfos.put(kernelName, kernelInfo);
332 			}
333 
334 			clContexts.add(context);
335 			clCommandQueues.add(commandQueue);
336 			clKernels.add(kernels);
337 			clPrograms.add(program);
338 
339 			final var openCLExecutionContext = OpenCLExecutionContext.builder()
340 					.platform(platform)
341 					.device(device)
342 					.clContext(context)
343 					.clCommandQueue(commandQueue)
344 					.kernels(kernels)
345 					.kernelInfos(kernelInfos)
346 					.clProgram(program)
347 					.build();
348 
349 			clExecutionContexts.add(openCLExecutionContext);
350 		}
351 
352 		final var fitness = gpuEAConfiguration.fitness();
353 		fitness.beforeAllEvaluations();
354 		for (final OpenCLExecutionContext clExecutionContext : clExecutionContexts) {
355 			fitness.beforeAllEvaluations(clExecutionContext, executorService);
356 		}
357 	}
358 
359 	/**
360 	 * Evaluates fitness for a population of genotypes using GPU acceleration.
361 	 * 
362 	 * <p>This method implements the core fitness evaluation logic by distributing the population across available OpenCL
363 	 * devices and executing fitness computation concurrently. The evaluation process follows these steps:
364 	 * 
365 	 * <ol>
366 	 * <li><strong>Population partitioning</strong>: Divides genotypes across available devices</li>
367 	 * <li><strong>Parallel dispatch</strong>: Submits evaluation tasks to each device asynchronously</li>
368 	 * <li><strong>GPU execution</strong>: Executes OpenCL kernels for fitness computation</li>
369 	 * <li><strong>Result collection</strong>: Gathers fitness values from all devices</li>
370 	 * <li><strong>Result aggregation</strong>: Combines results preserving original order</li>
371 	 * </ol>
372 	 * 
373 	 * <p>Load balancing strategy:
374 	 * <ul>
375 	 * <li>Automatically calculates partition size based on population and device count</li>
376 	 * <li>Round-robin assignment of partitions to devices for balanced workload</li>
377 	 * <li>Asynchronous execution allows devices to work at their optimal pace</li>
378 	 * </ul>
379 	 * 
380 	 * <p>The method coordinates with the configured fitness function through lifecycle hooks:
381 	 * <ul>
382 	 * <li>{@code beforeEvaluation()}: Called before each device partition evaluation</li>
383 	 * <li>{@code compute()}: Executes the actual GPU fitness computation</li>
384 	 * <li>{@code afterEvaluation()}: Called after each device partition completes</li>
385 	 * </ul>
386 	 * 
387 	 * <p>Concurrency and performance:
388 	 * <ul>
389 	 * <li>Multiple devices execute evaluation partitions concurrently</li>
390 	 * <li>CompletableFuture-based coordination for non-blocking execution</li>
391 	 * <li>Automatic workload distribution across available GPU resources</li>
392 	 * </ul>
393 	 * 
394 	 * @param generation the current generation number for context and logging
395 	 * @param genotypes  the population of genotypes to evaluate
396 	 * @return fitness values corresponding to each genotype in the same order
397 	 * @throws IllegalArgumentException if genotypes is null or empty
398 	 * @throws RuntimeException         if GPU evaluation fails or OpenCL errors occur
399 	 */
400 	@Override
401 	public List<T> evaluate(final long generation, final List<Genotype> genotypes) {
402 
403 		final var fitness = gpuEAConfiguration.fitness();
404 
405 		/**
406 		 * TODO make it configurable from execution context
407 		 */
408 		final int partitionSize = (int) (Math.ceil((double) genotypes.size() / clExecutionContexts.size()));
409 		final var subGenotypes = ListUtils.partition(genotypes, partitionSize);
410 		logger.debug("Genotype decomposed in {} partition(s)", subGenotypes.size());
411 		if (logger.isTraceEnabled()) {
412 			for (int i = 0; i < subGenotypes.size(); i++) {
413 				final List<Genotype> subGenotype = subGenotypes.get(i);
414 				logger.trace("\tPartition {} with {} elements", i, subGenotype.size());
415 			}
416 		}
417 
418 		final List<CompletableFuture<List<T>>> subResultsCF = new ArrayList<>();
419 		for (int i = 0; i < subGenotypes.size(); i++) {
420 			final var openCLExecutionContext = clExecutionContexts.get(i % clExecutionContexts.size());
421 			final var subGenotype = subGenotypes.get(i);
422 
423 			fitness.beforeEvaluation(generation, subGenotype);
424 			fitness.beforeEvaluation(openCLExecutionContext, executorService, generation, subGenotype);
425 
426 			final var resultsCF = fitness.compute(openCLExecutionContext, executorService, generation, subGenotype)
427 					.thenApply((results) -> {
428 
429 						fitness.afterEvaluation(openCLExecutionContext, executorService, generation, subGenotype);
430 						fitness.afterEvaluation(generation, subGenotype);
431 
432 						return results;
433 					});
434 
435 			subResultsCF.add(resultsCF);
436 		}
437 
438 		final List<T> resultsEvaluation = new ArrayList<>(genotypes.size());
439 		for (final CompletableFuture<List<T>> subResultCF : subResultsCF) {
440 			final var fitnessResults = subResultCF.join();
441 			resultsEvaluation.addAll(fitnessResults);
442 		}
443 		return resultsEvaluation;
444 	}
445 
446 	/**
447 	 * Cleans up OpenCL resources and releases GPU memory after evaluation completion.
448 	 * 
449 	 * <p>This method performs comprehensive cleanup of all OpenCL resources in the proper order to prevent memory leaks
450 	 * and ensure clean shutdown. The cleanup sequence follows OpenCL best practices for resource deallocation:
451 	 * 
452 	 * <ol>
453 	 * <li><strong>Fitness cleanup</strong>: Calls lifecycle hooks on the fitness function</li>
454 	 * <li><strong>Kernel release</strong>: Releases all compiled kernel objects</li>
455 	 * <li><strong>Program release</strong>: Releases compiled OpenCL programs</li>
456 	 * <li><strong>Queue release</strong>: Releases command queues and pending operations</li>
457 	 * <li><strong>Context release</strong>: Releases OpenCL contexts and associated memory</li>
458 	 * <li><strong>Reference cleanup</strong>: Clears internal data structures and references</li>
459 	 * </ol>
460 	 * 
461 	 * <p>Resource management guarantees:
462 	 * <ul>
463 	 * <li>All GPU memory allocations are properly released</li>
464 	 * <li>OpenCL objects are released in dependency order to avoid errors</li>
465 	 * <li>No resource leaks occur even if individual cleanup operations fail</li>
466 	 * <li>Evaluator returns to a clean state ready for potential reinitialization</li>
467 	 * </ul>
468 	 * 
469 	 * <p>The method coordinates with the configured fitness function to ensure any fitness-specific resources (buffers,
470 	 * textures, etc.) are also properly cleaned up through the {@code afterAllEvaluations()} lifecycle hooks.
471 	 * 
472 	 * @throws RuntimeException if cleanup operations fail (logged but not propagated to prevent interference with EA
473 	 *                          system shutdown)
474 	 */
475 	@Override
476 	public void postEvaluation() {
477 
478 		final var fitness = gpuEAConfiguration.fitness();
479 
480 		for (final OpenCLExecutionContext clExecutionContext : clExecutionContexts) {
481 			fitness.afterAllEvaluations(clExecutionContext, executorService);
482 		}
483 		fitness.afterAllEvaluations();
484 
485 		logger.debug("Releasing kernels");
486 
487 		for (final Map<String, cl_kernel> kernels : clKernels) {
488 			for (final cl_kernel clKernel : kernels.values()) {
489 				CL.clReleaseKernel(clKernel);
490 			}
491 		}
492 		clKernels.clear();
493 
494 		logger.debug("Releasing programs");
495 		for (final cl_program clProgram : clPrograms) {
496 			CL.clReleaseProgram(clProgram);
497 		}
498 		clPrograms.clear();
499 
500 		logger.debug("Releasing command queues");
501 		for (final cl_command_queue clCommandQueue : clCommandQueues) {
502 			CL.clReleaseCommandQueue(clCommandQueue);
503 		}
504 		clCommandQueues.clear();
505 
506 		logger.debug("Releasing contexts");
507 		for (final cl_context clContext : clContexts) {
508 			CL.clReleaseContext(clContext);
509 		}
510 		clContexts.clear();
511 
512 		clExecutionContexts.clear();
513 		selectedPlatformToDevice = null;
514 
515 		FitnessEvaluator.super.postEvaluation();
516 	}
517 }