import numpy as np
# Define the activation function (without sigmoid as you requested)
def activation_function(x):
return x # Identity function, which is just a linear activation
# Xavier initialization for weights
def initialize_weights(input_size, output_size):
return np.random.randn(input_size, output_size) * np.sqrt(2 / (input_size + output_size))
# Normalize input vector
def normalize_input(input_vector):
return 2 * (input_vector - np.min(input_vector)) / (np.max(input_vector) - np.min(input_vector)) - 1
# Training function
def train_neural_network(input_data, target_data, learning_rate=0.1, max_epochs=2000, error_threshold=1e-5):
input_size = len(input_data[0])
output_size = len(target_data[0])
# Initialize weights and bias
weights = initialize_weights(input_size, output_size)
biases = np.zeros(output_size)
prev_weight_update = np.zeros_like(weights)
prev_bias_update = np.zeros_like(biases)
# Dynamic learning rate decay
decay_factor = 0.001
for epoch in range(max_epochs):
total_error = 0
for i in range(len(input_data)):
input_vector = input_data[i]
target_vector = target_data[i]
# Normalize the input vector
normalized_input = normalize_input(input_vector)
# Forward pass
output = np.dot(normalized_input, weights) + biases
output = activation_function(output)
# Calculate error
error = target_vector - output
total_error += np.sum(error ** 2)
# Backpropagation (gradient descent)
weight_gradient = -2 * np.outer(normalized_input, error)
bias_gradient = -2 * error
# Momentum-based weight update
weight_update = learning_rate * weight_gradient + 0.9 * prev_weight_update
bias_update = learning_rate * bias_gradient + 0.9 * prev_bias_update
# Update weights and biases
weights += weight_update
biases += bias_update
prev_weight_update = weight_update
prev_bias_update = bias_update
# Dynamic learning rate decay
learning_rate = learning_rate / (1 + decay_factor * epoch)
# Early stopping: check if the error is below the threshold
if total_error < error_threshold:
print(f"Converged at epoch {epoch} with error: {total_error}")
break
# Optional: Print status for every 100 epochs
if epoch % 100 == 0:
print(f"Epoch {epoch}, Error: {total_error}")
return weights, biases
# Test the network with the provided truth tables
def test_neural_network(weights, biases, input_data):
predictions = []
for input_vector in input_data:
normalized_input = normalize_input(input_vector)
output = np.dot(normalized_input, weights) + biases
output = activation_function(output)
predictions.append(output)
return predictions
# Define the truth tables
input_data = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
]
# Corresponding targets (for the XOR problem)
target_data = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 1],
[1, 0, 0, 0],
[1, 0, 0, 1],
[1, 0, 1, 0],
[1, 0, 1, 1],
[1, 1, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]
]
# Train the neural network
learning_rate = 0.1
max_epochs = 2000
error_threshold = 1e-5
weights, biases = train_neural_network(input_data, target_data, learning_rate, max_epochs, error_threshold)
# Test the neural network
predictions = test_neural_network(weights, biases, input_data)
# Display the results
for i, (input_vector, target_vector, prediction) in enumerate(zip(input_data, target_data, predictions)):
print(f"Table {i+1}: Input: {input_vector}, Target: {target_vector}, Prediction: {prediction}, Error: {np.abs(np.array(target_vector) - np.array(prediction))}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIERlZmluZSB0aGUgYWN0aXZhdGlvbiBmdW5jdGlvbiAod2l0aG91dCBzaWdtb2lkIGFzIHlvdSByZXF1ZXN0ZWQpCmRlZiBhY3RpdmF0aW9uX2Z1bmN0aW9uKHgpOgogICAgcmV0dXJuIHggICMgSWRlbnRpdHkgZnVuY3Rpb24sIHdoaWNoIGlzIGp1c3QgYSBsaW5lYXIgYWN0aXZhdGlvbgoKIyBYYXZpZXIgaW5pdGlhbGl6YXRpb24gZm9yIHdlaWdodHMKZGVmIGluaXRpYWxpemVfd2VpZ2h0cyhpbnB1dF9zaXplLCBvdXRwdXRfc2l6ZSk6CiAgICByZXR1cm4gbnAucmFuZG9tLnJhbmRuKGlucHV0X3NpemUsIG91dHB1dF9zaXplKSAqIG5wLnNxcnQoMiAvIChpbnB1dF9zaXplICsgb3V0cHV0X3NpemUpKQoKIyBOb3JtYWxpemUgaW5wdXQgdmVjdG9yCmRlZiBub3JtYWxpemVfaW5wdXQoaW5wdXRfdmVjdG9yKToKICAgIHJldHVybiAyICogKGlucHV0X3ZlY3RvciAtIG5wLm1pbihpbnB1dF92ZWN0b3IpKSAvIChucC5tYXgoaW5wdXRfdmVjdG9yKSAtIG5wLm1pbihpbnB1dF92ZWN0b3IpKSAtIDEKCiMgVHJhaW5pbmcgZnVuY3Rpb24KZGVmIHRyYWluX25ldXJhbF9uZXR3b3JrKGlucHV0X2RhdGEsIHRhcmdldF9kYXRhLCBsZWFybmluZ19yYXRlPTAuMSwgbWF4X2Vwb2Nocz0yMDAwLCBlcnJvcl90aHJlc2hvbGQ9MWUtNSk6CiAgICBpbnB1dF9zaXplID0gbGVuKGlucHV0X2RhdGFbMF0pCiAgICBvdXRwdXRfc2l6ZSA9IGxlbih0YXJnZXRfZGF0YVswXSkKICAgIAogICAgIyBJbml0aWFsaXplIHdlaWdodHMgYW5kIGJpYXMKICAgIHdlaWdodHMgPSBpbml0aWFsaXplX3dlaWdodHMoaW5wdXRfc2l6ZSwgb3V0cHV0X3NpemUpCiAgICBiaWFzZXMgPSBucC56ZXJvcyhvdXRwdXRfc2l6ZSkKICAgIAogICAgcHJldl93ZWlnaHRfdXBkYXRlID0gbnAuemVyb3NfbGlrZSh3ZWlnaHRzKQogICAgcHJldl9iaWFzX3VwZGF0ZSA9IG5wLnplcm9zX2xpa2UoYmlhc2VzKQogICAgCiAgICAjIER5bmFtaWMgbGVhcm5pbmcgcmF0ZSBkZWNheQogICAgZGVjYXlfZmFjdG9yID0gMC4wMDEKICAgIAogICAgZm9yIGVwb2NoIGluIHJhbmdlKG1heF9lcG9jaHMpOgogICAgICAgIHRvdGFsX2Vycm9yID0gMAogICAgICAgIGZvciBpIGluIHJhbmdlKGxlbihpbnB1dF9kYXRhKSk6CiAgICAgICAgICAgIGlucHV0X3ZlY3RvciA9IGlucHV0X2RhdGFbaV0KICAgICAgICAgICAgdGFyZ2V0X3ZlY3RvciA9IHRhcmdldF9kYXRhW2ldCgogICAgICAgICAgICAjIE5vcm1hbGl6ZSB0aGUgaW5wdXQgdmVjdG9yCiAgICAgICAgICAgIG5vcm1hbGl6ZWRfaW5wdXQgPSBub3JtYWxpemVfaW5wdXQoaW5wdXRfdmVjdG9yKQoKICAgICAgICAgICAgIyBGb3J3YXJkIHBhc3MKICAgICAgICAgICAgb3V0cHV0ID0gbnAuZG90KG5vcm1hbGl6ZWRfaW5wdXQsIHdlaWdodHMpICsgYmlhc2VzCiAgICAgICAgICAgIG91dHB1dCA9IGFjdGl2YXRpb25fZnVuY3Rpb24ob3V0cHV0KQoKICAgICAgICAgICAgIyBDYWxjdWxhdGUgZXJyb3IKICAgICAgICAgICAgZXJyb3IgPSB0YXJnZXRfdmVjdG9yIC0gb3V0cHV0CiAgICAgICAgICAgIHRvdGFsX2Vycm9yICs9IG5wLnN1bShlcnJvciAqKiAyKQoKICAgICAgICAgICAgIyBCYWNrcHJvcGFnYXRpb24gKGdyYWRpZW50IGRlc2NlbnQpCiAgICAgICAgICAgIHdlaWdodF9ncmFkaWVudCA9IC0yICogbnAub3V0ZXIobm9ybWFsaXplZF9pbnB1dCwgZXJyb3IpCiAgICAgICAgICAgIGJpYXNfZ3JhZGllbnQgPSAtMiAqIGVycm9yCgogICAgICAgICAgICAjIE1vbWVudHVtLWJhc2VkIHdlaWdodCB1cGRhdGUKICAgICAgICAgICAgd2VpZ2h0X3VwZGF0ZSA9IGxlYXJuaW5nX3JhdGUgKiB3ZWlnaHRfZ3JhZGllbnQgKyAwLjkgKiBwcmV2X3dlaWdodF91cGRhdGUKICAgICAgICAgICAgYmlhc191cGRhdGUgPSBsZWFybmluZ19yYXRlICogYmlhc19ncmFkaWVudCArIDAuOSAqIHByZXZfYmlhc191cGRhdGUKCiAgICAgICAgICAgICMgVXBkYXRlIHdlaWdodHMgYW5kIGJpYXNlcwogICAgICAgICAgICB3ZWlnaHRzICs9IHdlaWdodF91cGRhdGUKICAgICAgICAgICAgYmlhc2VzICs9IGJpYXNfdXBkYXRlCgogICAgICAgICAgICBwcmV2X3dlaWdodF91cGRhdGUgPSB3ZWlnaHRfdXBkYXRlCiAgICAgICAgICAgIHByZXZfYmlhc191cGRhdGUgPSBiaWFzX3VwZGF0ZQoKICAgICAgICAjIER5bmFtaWMgbGVhcm5pbmcgcmF0ZSBkZWNheQogICAgICAgIGxlYXJuaW5nX3JhdGUgPSBsZWFybmluZ19yYXRlIC8gKDEgKyBkZWNheV9mYWN0b3IgKiBlcG9jaCkKICAgICAgICAKICAgICAgICAjIEVhcmx5IHN0b3BwaW5nOiBjaGVjayBpZiB0aGUgZXJyb3IgaXMgYmVsb3cgdGhlIHRocmVzaG9sZAogICAgICAgIGlmIHRvdGFsX2Vycm9yIDwgZXJyb3JfdGhyZXNob2xkOgogICAgICAgICAgICBwcmludChmIkNvbnZlcmdlZCBhdCBlcG9jaCB7ZXBvY2h9IHdpdGggZXJyb3I6IHt0b3RhbF9lcnJvcn0iKQogICAgICAgICAgICBicmVhawogICAgICAgIAogICAgICAgICMgT3B0aW9uYWw6IFByaW50IHN0YXR1cyBmb3IgZXZlcnkgMTAwIGVwb2NocwogICAgICAgIGlmIGVwb2NoICUgMTAwID09IDA6CiAgICAgICAgICAgIHByaW50KGYiRXBvY2gge2Vwb2NofSwgRXJyb3I6IHt0b3RhbF9lcnJvcn0iKQogICAgCiAgICByZXR1cm4gd2VpZ2h0cywgYmlhc2VzCgojIFRlc3QgdGhlIG5ldHdvcmsgd2l0aCB0aGUgcHJvdmlkZWQgdHJ1dGggdGFibGVzCmRlZiB0ZXN0X25ldXJhbF9uZXR3b3JrKHdlaWdodHMsIGJpYXNlcywgaW5wdXRfZGF0YSk6CiAgICBwcmVkaWN0aW9ucyA9IFtdCiAgICBmb3IgaW5wdXRfdmVjdG9yIGluIGlucHV0X2RhdGE6CiAgICAgICAgbm9ybWFsaXplZF9pbnB1dCA9IG5vcm1hbGl6ZV9pbnB1dChpbnB1dF92ZWN0b3IpCiAgICAgICAgb3V0cHV0ID0gbnAuZG90KG5vcm1hbGl6ZWRfaW5wdXQsIHdlaWdodHMpICsgYmlhc2VzCiAgICAgICAgb3V0cHV0ID0gYWN0aXZhdGlvbl9mdW5jdGlvbihvdXRwdXQpCiAgICAgICAgcHJlZGljdGlvbnMuYXBwZW5kKG91dHB1dCkKICAgIHJldHVybiBwcmVkaWN0aW9ucwoKIyBEZWZpbmUgdGhlIHRydXRoIHRhYmxlcwppbnB1dF9kYXRhID0gWwogICAgWzAsIDAsIDAsIDBdLAogICAgWzAsIDAsIDAsIDFdLAogICAgWzAsIDAsIDEsIDBdLAogICAgWzAsIDAsIDEsIDFdLAogICAgWzAsIDEsIDAsIDBdLAogICAgWzAsIDEsIDAsIDFdLAogICAgWzAsIDEsIDEsIDBdLAogICAgWzAsIDEsIDEsIDFdLAogICAgWzEsIDAsIDAsIDBdLAogICAgWzEsIDAsIDAsIDFdLAogICAgWzEsIDAsIDEsIDBdLAogICAgWzEsIDAsIDEsIDFdLAogICAgWzEsIDEsIDAsIDBdLAogICAgWzEsIDEsIDAsIDFdLAogICAgWzEsIDEsIDEsIDBdLAogICAgWzEsIDEsIDEsIDFdCl0KCiMgQ29ycmVzcG9uZGluZyB0YXJnZXRzIChmb3IgdGhlIFhPUiBwcm9ibGVtKQp0YXJnZXRfZGF0YSA9IFsKICAgIFswLCAwLCAwLCAwXSwKICAgIFswLCAwLCAwLCAxXSwKICAgIFswLCAwLCAxLCAwXSwKICAgIFswLCAwLCAxLCAxXSwKICAgIFswLCAxLCAwLCAwXSwKICAgIFswLCAxLCAwLCAxXSwKICAgIFswLCAxLCAxLCAwXSwKICAgIFswLCAxLCAxLCAxXSwKICAgIFsxLCAwLCAwLCAwXSwKICAgIFsxLCAwLCAwLCAxXSwKICAgIFsxLCAwLCAxLCAwXSwKICAgIFsxLCAwLCAxLCAxXSwKICAgIFsxLCAxLCAwLCAwXSwKICAgIFsxLCAxLCAwLCAxXSwKICAgIFsxLCAxLCAxLCAwXSwKICAgIFsxLCAxLCAxLCAxXQpdCgojIFRyYWluIHRoZSBuZXVyYWwgbmV0d29yawpsZWFybmluZ19yYXRlID0gMC4xCm1heF9lcG9jaHMgPSAyMDAwCmVycm9yX3RocmVzaG9sZCA9IDFlLTUKCndlaWdodHMsIGJpYXNlcyA9IHRyYWluX25ldXJhbF9uZXR3b3JrKGlucHV0X2RhdGEsIHRhcmdldF9kYXRhLCBsZWFybmluZ19yYXRlLCBtYXhfZXBvY2hzLCBlcnJvcl90aHJlc2hvbGQpCgojIFRlc3QgdGhlIG5ldXJhbCBuZXR3b3JrCnByZWRpY3Rpb25zID0gdGVzdF9uZXVyYWxfbmV0d29yayh3ZWlnaHRzLCBiaWFzZXMsIGlucHV0X2RhdGEpCgojIERpc3BsYXkgdGhlIHJlc3VsdHMKZm9yIGksIChpbnB1dF92ZWN0b3IsIHRhcmdldF92ZWN0b3IsIHByZWRpY3Rpb24pIGluIGVudW1lcmF0ZSh6aXAoaW5wdXRfZGF0YSwgdGFyZ2V0X2RhdGEsIHByZWRpY3Rpb25zKSk6CiAgICBwcmludChmIlRhYmxlIHtpKzF9OiBJbnB1dDoge2lucHV0X3ZlY3Rvcn0sIFRhcmdldDoge3RhcmdldF92ZWN0b3J9LCBQcmVkaWN0aW9uOiB7cHJlZGljdGlvbn0sIEVycm9yOiB7bnAuYWJzKG5wLmFycmF5KHRhcmdldF92ZWN0b3IpIC0gbnAuYXJyYXkocHJlZGljdGlvbikpfSIpCg==
Epoch 0, Error: nan
Epoch 100, Error: nan
Epoch 200, Error: nan
Epoch 300, Error: nan
Epoch 400, Error: nan
Epoch 500, Error: nan
Epoch 600, Error: nan
Epoch 700, Error: nan
Epoch 800, Error: nan
Epoch 900, Error: nan
Epoch 1000, Error: nan
Epoch 1100, Error: nan
Epoch 1200, Error: nan
Epoch 1300, Error: nan
Epoch 1400, Error: nan
Epoch 1500, Error: nan
Epoch 1600, Error: nan
Epoch 1700, Error: nan
Epoch 1800, Error: nan
Epoch 1900, Error: nan
Table 1: Input: [0, 0, 0, 0], Target: [0, 0, 0, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 2: Input: [0, 0, 0, 1], Target: [0, 0, 0, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 3: Input: [0, 0, 1, 0], Target: [0, 0, 1, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 4: Input: [0, 0, 1, 1], Target: [0, 0, 1, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 5: Input: [0, 1, 0, 0], Target: [0, 1, 0, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 6: Input: [0, 1, 0, 1], Target: [0, 1, 0, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 7: Input: [0, 1, 1, 0], Target: [0, 1, 1, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 8: Input: [0, 1, 1, 1], Target: [0, 1, 1, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 9: Input: [1, 0, 0, 0], Target: [1, 0, 0, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 10: Input: [1, 0, 0, 1], Target: [1, 0, 0, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 11: Input: [1, 0, 1, 0], Target: [1, 0, 1, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 12: Input: [1, 0, 1, 1], Target: [1, 0, 1, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 13: Input: [1, 1, 0, 0], Target: [1, 1, 0, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 14: Input: [1, 1, 0, 1], Target: [1, 1, 0, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 15: Input: [1, 1, 1, 0], Target: [1, 1, 1, 0], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
Table 16: Input: [1, 1, 1, 1], Target: [1, 1, 1, 1], Prediction: [nan nan nan nan], Error: [nan nan nan nan]
./prog.py:13: RuntimeWarning: invalid value encountered in true_divide