Python code

Code shown in the basic video about neural networks in Python

import numpy as np
import matplotlib.pyplot as plt
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam

# Data
PSA = np.array([3.8,3.4,2.9,2.8,2.7,2.1,1.6,2.5,2.0,1.7,1.4,1.2,0.9,0.8])
Group = np.array(['C','C','C','C','C','C','C','H','H','H','H','H','H','H'])
target = np.where(Group == 'C', 1, 0)

random.seed(915)
# Build the neural network model
model = Sequential()
model.add(Dense(1, input_dim=1, activation='sigmoid'))

# Compile the model
model.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy')

# Train the model
model.fit(PSA, target, epochs=3000, verbose=1)

# Show the final weights
model.get_weights()

# Use the network to predict
x1=np.array([2])
model.predict(x1, verbose=0)

 

Code for an autoencoder that remove noise in an image:

import numpy as np
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt

# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Filter the dataset to include only '2's
x_train_2s = x_train[y_train == 2]
x_test_2s = x_test[y_test == 2]

# Normalize the data
x_train_2s = x_train_2s.astype('float32') / 255.
x_test_2s = x_test_2s.astype('float32') / 255.

# Reshape the data to include the channel dimension
x_train_2s = np.reshape(x_train_2s, (len(x_train_2s), 28, 28, 1))
x_test_2s = np.reshape(x_test_2s, (len(x_test_2s), 28, 28, 1))

# Add noise to the images
noise_factor = 0.3
x_train_noisy = x_train_2s + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train_2s.shape) 
x_test_noisy = x_test_2s + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test_2s.shape)

# Make sure all values are between 0 and 1
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)

# Build the autoencoder
input_img = layers.Input(shape=(28, 28, 1))

# Encoder
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = layers.MaxPooling2D((2, 2), padding='same')(x)

# Decoder
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = layers.UpSampling2D((2, 2))(x)
x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = layers.UpSampling2D((2, 2))(x)
decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = models.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

# Train the autoencoder
autoencoder.fit(x_train_noisy, x_train_2s,epochs=50,batch_size=128)

# Use the autoencoder to denoise the test images
decoded_imgs = autoencoder.predict(x_test_noisy)

n=4
plt.figure(figsize=(6, 20))
for i in range(n):
    # Display original
    ax = plt.subplot(10, 3, 3*i + 1)
    plt.imshow(x_test_2s[i].reshape(28, 28))
    plt.title("Original")
    plt.axis('off')

    # Display noisy
    ax = plt.subplot(10, 3, 3*i + 2)
    plt.imshow(x_test_noisy[i].reshape(28, 28))
    plt.title("Noisy")
    plt.axis('off')

    # Display denoised
    ax = plt.subplot(10, 3, 3*i + 3)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.title("Denoised")
    plt.axis('off')

plt.show()

 

Code for recurrent neural network (RNN). Note that this code is for educational purposes only and is therefore not intended to be used to predict the stock market.

import numpy as np
import random
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN
# Normalize the data
y = np.array([9, 7, 6, 10, 8, 7, 11, 9, 6, 12, 10, 7, 11, 9, 7])
yn = (y - np.min(y)) / (np.max(y) - np.min(y))
train_size=10
train = yn[0:train_size]
# Prepare training data
X_train = []
y_train = []
step = 4
for i in range(step, len(train)):
    X_train.append(yn[i-step:i])
    y_train.append(yn[i])

X_train = np.reshape(X_train, (len(X_train),step, 1))# Data, samples, time steps, features
y_train = np.reshape(y_train, (len(y_train),1, 1))
# Set seed for reproducibility
random.seed(40)
# Define the RNN model
model = Sequential()
model.add(SimpleRNN(units=5, input_shape=(step, 1), activation="sigmoid"))
model.add(Dense(units=1, activation="sigmoid"))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
# Train the model
history = model.fit(X_train, y_train, epochs=1000)
# Plot the training loss
plt.figure()
plt.plot(history.history['loss'])
plt.xlabel('Epochs') # Added xlabel
plt.ylabel('Loss') # Added ylabel
plt.title('Training Loss') # Added title
plt.show()

# Predict on training data
y_pred_train = model.predict(X_train)
y_pred_train = y_pred_train.flatten()
# Prepare validation data
valid = yn[train_size-step:len(yn)]
X_valid = []
y_valid = []
for i in range(step, len(valid)):
    X_valid.append(valid[i-step:i])
    y_valid.append(valid[i])

X_valid = np.reshape(X_valid, (len(X_valid),step, 1))
y_valid = np.reshape(y_valid, (len(y_valid),1, 1))

# Predict on validation data
y_pred_valid = model.predict(X_valid)
y_pred_valid = y_pred_valid.flatten()

# Set time intervals
t1 = np.arange(1, len(yn) + 1)
t2 = np.arange(step+1, train_size + 1)
t3 = np.arange(step+y_pred_train.shape[0]+1, len(yn) + 1)
plt.figure(figsize=(8, 5))
plt.plot(t1, yn, linestyle="-", marker="o", color="blue", label="Actual values")
plt.plot(t2, y_pred_train, linestyle="-", marker="o", color="red", label="Predicted values (training)")
plt.plot(t3, y_pred_valid, linestyle="-", marker="o", color="green", label="Predicted values (validation)")
plt.xlabel('Time Step') # Added xlabel
plt.ylabel('Normalized Values') # Added ylabel
plt.title('Actual vs Predicted Values') # Added title
plt.legend()
plt.show()

Code for Convolutional neural network (CNN).

import numpy as np
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

zero = [1,1,1,1,0, 1,0,0,1,0, 1,0,0,1,0, 1,0,0,1,0, 1,1,1,1,0]
one = [0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0, 0,0,1,0,0]
two = [1,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0]
three = [1,1,1,1,0, 0,0,0,1,0, 0,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0]
four = [1,0,0,1,0, 1,0,0,1,0, 1,1,1,1,0, 0,0,0,1,0, 0,0,0,1,0]
five = [1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0, 0,0,0,1,0, 1,1,1,1,0]
six = [1,1,1,1,0, 1,0,0,0,0, 1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0]
seven = [1,1,1,1,0, 0,0,0,1,0, 0,0,1,0,0, 0,1,0,0,0, 0,1,0,0,0]
eight= [1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0]
nine = [1,1,1,1,0, 1,0,0,1,0, 1,1,1,1,0, 0,0,0,1,0, 0,0,0,1,0]

X = np.array([zero, one, two, three, four, five, six, seven, eight, nine])
X = X.reshape(10 ,5, 5)

y = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = to_categorical(y, 10)

plt.imshow(X[3])

random.seed(1)
model = Sequential([
Conv2D(filters=1, 
kernel_size=(2, 2),
strides=(1, 1), 
padding='valid', 
input_shape=(5, 5, 1),
use_bias=False, 
activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(10, activation='softmax')
])
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X, y, epochs=1000, batch_size=1, verbose=1)

model.predict(X[[0]])