1 | package net.bmahe.genetics4j.neat; | |
2 | ||
3 | import java.util.function.Function; | |
4 | ||
5 | /** | |
6 | * Utility class providing common activation functions for NEAT (NeuroEvolution of Augmenting Topologies) neural | |
7 | * networks. | |
8 | * | |
9 | * <p>Activations contains a collection of mathematical functions commonly used as activation functions in neural | |
10 | * networks. These functions transform the weighted sum of inputs at each node into the node's output value, introducing | |
11 | * non-linearity that enables neural networks to learn complex patterns. | |
12 | * | |
13 | * <p>Available activation functions: | |
14 | * <ul> | |
15 | * <li><strong>Linear</strong>: Simple linear transformation with slope and bias parameters</li> | |
16 | * <li><strong>Sigmoid</strong>: Logistic function providing smooth transition between 0 and 1</li> | |
17 | * <li><strong>Hyperbolic tangent</strong>: Smooth function providing output between -1 and 1</li> | |
18 | * <li><strong>Identity</strong>: Pass-through function for linear networks</li> | |
19 | * <li><strong>NEAT paper</strong>: Sigmoid variant with specific parameters from original NEAT research</li> | |
20 | * </ul> | |
21 | * | |
22 | * <p>Function variations: | |
23 | * <ul> | |
24 | * <li><strong>Float versions</strong>: Optimized for float precision networks</li> | |
25 | * <li><strong>Double versions</strong>: Higher precision for sensitive applications</li> | |
26 | * <li><strong>Parameterized versions</strong>: Customizable function parameters</li> | |
27 | * <li><strong>Pre-configured versions</strong>: Common parameter combinations</li> | |
28 | * </ul> | |
29 | * | |
30 | * <p>Common usage patterns: | |
31 | * | |
32 | * <pre>{@code | |
33 | * // Use standard sigmoid activation | |
34 | * FeedForwardNetwork network = new FeedForwardNetwork(inputNodes, outputNodes, connections, Activations::sigmoid); | |
35 | * | |
36 | * // Custom sigmoid with different steepness | |
37 | * Function<Double, Double> customSigmoid = Activations.sigmoid(2.0); | |
38 | * FeedForwardNetwork steeperNetwork = new FeedForwardNetwork(inputNodes, outputNodes, connections, customSigmoid); | |
39 | * | |
40 | * // Hyperbolic tangent for outputs in [-1, 1] range | |
41 | * FeedForwardNetwork tanhNetwork = new FeedForwardNetwork(inputNodes, outputNodes, connections, Activations::tanh); | |
42 | * | |
43 | * // Linear activation for regression problems | |
44 | * FeedForwardNetwork linearNetwork = new FeedForwardNetwork(inputNodes, | |
45 | * outputNodes, | |
46 | * connections, | |
47 | * Activations::identity); | |
48 | * | |
49 | * // Float versions for memory efficiency | |
50 | * Function<Float, Float> floatSigmoid = Activations.sigmoidFloat; | |
51 | * }</pre> | |
52 | * | |
53 | * <p>Activation function characteristics: | |
54 | * <ul> | |
55 | * <li><strong>Sigmoid</strong>: Smooth, bounded [0,1], good for binary classification</li> | |
56 | * <li><strong>Tanh</strong>: Smooth, bounded [-1,1], zero-centered, often preferred over sigmoid</li> | |
57 | * <li><strong>Linear</strong>: Unbounded, preserves gradients, suitable for regression</li> | |
58 | * <li><strong>Identity</strong>: No transformation, useful for pass-through connections</li> | |
59 | * </ul> | |
60 | * | |
61 | * <p>Performance considerations: | |
62 | * <ul> | |
63 | * <li><strong>Float vs Double</strong>: Float versions use less memory and may be faster</li> | |
64 | * <li><strong>Function references</strong>: Pre-defined functions avoid object creation</li> | |
65 | * <li><strong>Mathematical operations</strong>: Optimized implementations for common cases</li> | |
66 | * <li><strong>Branch prediction</strong>: Simple functions improve CPU branch prediction</li> | |
67 | * </ul> | |
68 | * | |
69 | * <p>Integration with NEAT evolution: | |
70 | * <ul> | |
71 | * <li><strong>Network evaluation</strong>: Applied to hidden and output nodes during forward propagation</li> | |
72 | * <li><strong>Fitness computation</strong>: Affects network behavior and resulting fitness</li> | |
73 | * <li><strong>Gradient flow</strong>: Function choice impacts learning and evolution dynamics</li> | |
74 | * <li><strong>Problem matching</strong>: Different problems benefit from different activation functions</li> | |
75 | * </ul> | |
76 | * | |
77 | * @see FeedForwardNetwork | |
78 | * @see NeatChromosome | |
79 | * @see Function | |
80 | */ | |
81 | public class Activations { | |
82 | ||
83 | private Activations() { | |
84 | } | |
85 | ||
86 | /** | |
87 | * Creates a linear activation function with specified slope and bias for float values. | |
88 | * | |
89 | * <p>The linear function computes: f(x) = a * x + b | |
90 | * | |
91 | * @param a the slope parameter | |
92 | * @param b the bias parameter | |
93 | * @return a linear activation function | |
94 | */ | |
95 | public static Function<Float, Float> linearFloat(final float a, final float b) { | |
96 |
6
1. lambda$linearFloat$0 : replaced Float return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$linearFloat$0 → KILLED 2. lambda$linearFloat$0 : Replaced float multiplication with division → KILLED 3. lambda$linearFloat$0 : removed call to java/lang/Float::floatValue → KILLED 4. lambda$linearFloat$0 : Replaced float addition with subtraction → KILLED 5. lambda$linearFloat$0 : removed call to java/lang/Float::valueOf → KILLED 6. linearFloat : replaced return value with null for net/bmahe/genetics4j/neat/Activations::linearFloat → KILLED |
return (x) -> a * x + b; |
97 | } | |
98 | ||
99 | /** | |
100 | * Creates a linear activation function with specified slope and bias for double values. | |
101 | * | |
102 | * <p>The linear function computes: f(x) = a * x + b | |
103 | * | |
104 | * @param a the slope parameter | |
105 | * @param b the bias parameter | |
106 | * @return a linear activation function | |
107 | */ | |
108 | public static Function<Double, Double> linear(final double a, final double b) { | |
109 |
6
1. linear : replaced return value with null for net/bmahe/genetics4j/neat/Activations::linear → KILLED 2. lambda$linear$1 : removed call to java/lang/Double::doubleValue → KILLED 3. lambda$linear$1 : removed call to java/lang/Double::valueOf → KILLED 4. lambda$linear$1 : replaced Double return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$linear$1 → KILLED 5. lambda$linear$1 : Replaced double multiplication with division → KILLED 6. lambda$linear$1 : Replaced double addition with subtraction → KILLED |
return (x) -> a * x + b; |
110 | } | |
111 | ||
112 | /** | |
113 | * Creates a sigmoid activation function with specified steepness for float values. | |
114 | * | |
115 | * <p>The sigmoid function computes: f(x) = 1 / (1 + exp(-a * x)) | |
116 | * <p>Output range: (0, 1) | |
117 | * | |
118 | * @param a the steepness parameter (higher values create steeper transitions) | |
119 | * @return a sigmoid activation function | |
120 | */ | |
121 | public static Function<Float, Float> sigmoidFloat(final float a) { | |
122 |
12
1. lambda$sigmoidFloat$2 : Replaced float division with multiplication → KILLED 2. sigmoidFloat : replaced return value with null for net/bmahe/genetics4j/neat/Activations::sigmoidFloat → KILLED 3. lambda$sigmoidFloat$2 : removed call to java/lang/Float::valueOf → KILLED 4. lambda$sigmoidFloat$2 : removed call to java/lang/Math::exp → KILLED 5. lambda$sigmoidFloat$2 : Substituted 1.0 with 2.0 → KILLED 6. lambda$sigmoidFloat$2 : removed negation → KILLED 7. lambda$sigmoidFloat$2 : replaced call to java/lang/Math::exp with argument → KILLED 8. lambda$sigmoidFloat$2 : removed call to java/lang/Float::floatValue → KILLED 9. lambda$sigmoidFloat$2 : Replaced float multiplication with division → KILLED 10. lambda$sigmoidFloat$2 : Replaced float addition with subtraction → KILLED 11. lambda$sigmoidFloat$2 : replaced Float return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$sigmoidFloat$2 → KILLED 12. lambda$sigmoidFloat$2 : Substituted 1.0 with 2.0 → KILLED |
return (x) -> 1.0f / (1.0f + (float) Math.exp(-a * x)); |
123 | } | |
124 | ||
125 | /** | |
126 | * Creates a sigmoid activation function with specified steepness for double values. | |
127 | * | |
128 | * <p>The sigmoid function computes: f(x) = 1 / (1 + exp(-a * x)) | |
129 | * <p>Output range: (0, 1) | |
130 | * | |
131 | * @param a the steepness parameter (higher values create steeper transitions) | |
132 | * @return a sigmoid activation function | |
133 | */ | |
134 | public static Function<Double, Double> sigmoid(final double a) { | |
135 |
12
1. lambda$sigmoid$3 : Replaced double division with multiplication → KILLED 2. lambda$sigmoid$3 : removed call to java/lang/Double::valueOf → KILLED 3. lambda$sigmoid$3 : Substituted 1.0 with 2.0 → KILLED 4. lambda$sigmoid$3 : replaced call to java/lang/Math::exp with argument → KILLED 5. lambda$sigmoid$3 : removed call to java/lang/Double::doubleValue → KILLED 6. lambda$sigmoid$3 : Replaced double addition with subtraction → KILLED 7. lambda$sigmoid$3 : removed call to java/lang/Math::exp → KILLED 8. sigmoid : replaced return value with null for net/bmahe/genetics4j/neat/Activations::sigmoid → KILLED 9. lambda$sigmoid$3 : Replaced double multiplication with division → KILLED 10. lambda$sigmoid$3 : removed negation → KILLED 11. lambda$sigmoid$3 : Substituted 1.0 with 2.0 → KILLED 12. lambda$sigmoid$3 : replaced Double return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$sigmoid$3 → KILLED |
return (x) -> 1.0d / (1.0d + Math.exp(-a * x)); |
136 | } | |
137 | ||
138 | /** Standard sigmoid activation function for float values (steepness = 1.0). */ | |
139 | public static Function<Float, Float> sigmoidFloat = sigmoidFloat(1.0f); | |
140 | ||
141 | /** Standard sigmoid activation function for double values (steepness = 1.0). */ | |
142 | public static Function<Double, Double> sigmoid = sigmoid(1.0d); | |
143 | ||
144 | /** Identity activation function for float values (f(x) = x). */ | |
145 | public static Function<Float, Float> identityFloat = linearFloat(1.0f, 0.0f); | |
146 | ||
147 | /** Identity activation function for double values (f(x) = x). */ | |
148 | public static Function<Double, Double> identity = linear(1.0d, 0.0d); | |
149 | ||
150 | /** Hyperbolic tangent activation function for float values. Output range: (-1, 1). */ | |
151 |
5
1. lambda$static$4 : replaced call to java/lang/Math::tanh with argument → KILLED 2. lambda$static$4 : removed call to java/lang/Math::tanh → KILLED 3. lambda$static$4 : removed call to java/lang/Float::valueOf → KILLED 4. lambda$static$4 : removed call to java/lang/Float::floatValue → KILLED 5. lambda$static$4 : replaced Float return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$static$4 → KILLED |
public static Function<Float, Float> tanhFloat = (x) -> (float) Math.tanh(x); |
152 | ||
153 | /** Hyperbolic tangent activation function for double values. Output range: (-1, 1). */ | |
154 |
5
1. lambda$static$5 : removed call to java/lang/Double::valueOf → KILLED 2. lambda$static$5 : removed call to java/lang/Double::doubleValue → KILLED 3. lambda$static$5 : removed call to java/lang/Math::tanh → KILLED 4. lambda$static$5 : replaced call to java/lang/Math::tanh with argument → KILLED 5. lambda$static$5 : replaced Double return value with 0 for net/bmahe/genetics4j/neat/Activations::lambda$static$5 → KILLED |
public static Function<Double, Double> tanh = (x) -> Math.tanh(x); |
155 | ||
156 | /** Sigmoid activation function with steepness 4.9 as used in the original NEAT paper (float version). */ | |
157 | public static Function<Float, Float> neatPaperFloat = sigmoidFloat(4.9f); | |
158 | ||
159 | /** Sigmoid activation function with steepness 4.9 as used in the original NEAT paper (double version). */ | |
160 | public static Function<Double, Double> neatPaper = sigmoid(4.9f); | |
161 | } | |
Mutations | ||
96 |
1.1 2.2 3.3 4.4 5.5 6.6 |
|
109 |
1.1 2.2 3.3 4.4 5.5 6.6 |
|
122 |
1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9 10.10 11.11 12.12 |
|
135 |
1.1 2.2 3.3 4.4 5.5 6.6 7.7 8.8 9.9 10.10 11.11 12.12 |
|
151 |
1.1 2.2 3.3 4.4 5.5 |
|
154 |
1.1 2.2 3.3 4.4 5.5 |