Code icon

The App is Under a Quick Maintenance

We apologize for the inconvenience. Please come back later

Menu iconMenu iconDeep Learning and AI Superhero
Deep Learning and AI Superhero

Chapter 6: Recurrent Neural Networks (RNNs) and LSTMs

Practical Exercises Chapter 6

Exercise 1: Implement a Simple RNN for Sequence Classification

Task: Implement a simple RNN to classify sequences of numbers. Use synthetic data where each sequence is classified as positive if the sum of the elements is above a threshold, and negative otherwise.

Solution:

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

# Generate synthetic data (binary classification based on sequence sum)
def generate_data(num_samples=1000, sequence_length=10, threshold=5):
    X = torch.randint(0, 3, (num_samples, sequence_length)).float()
    y = (X.sum(dim=1) > threshold).float()
    return X, y

# Define the RNN model
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out, _ = self.rnn(x)
        out = self.fc(out[:, -1, :])  # Use the output from the last time step
        return out

# Hyperparameters
input_size = 1
hidden_size = 16
output_size = 1
learning_rate = 0.001
epochs = 5

# Generate data
X, y = generate_data()
X = X.unsqueeze(-1)  # Add input size dimension
dataset = TensorDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Initialize model, loss function, and optimizer
model = SimpleRNN(input_size, hidden_size, output_size)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    print(f"Epoch {epoch+1}, Loss: {running_loss / len(dataloader)}")

# Example prediction
with torch.no_grad():
    example_seq = torch.tensor([[0, 1, 2, 0, 1, 2, 1, 0, 2, 1]]).float().unsqueeze(-1)
    output = model(example_seq)
    print("Predicted output:", torch.sigmoid(output))

In this exercise:

  • We created synthetic sequences where the sum of elements determines the class.
  • A simple RNN was used to classify sequences as positive or negative.
  • The model was trained using binary cross-entropy loss and evaluated with an example sequence.

Exercise 2: Implement an LSTM for Text Generation

Task: Train an LSTM on character-level text data to generate new text. Use a simple dataset like Shakespeare’s text.

Solution:

import tensorflow as tf
import numpy as np

# Load dataset (for simplicity, we use a small string for text generation)
text = "To be, or not to be, that is the question."

# Preprocess the data
vocab = sorted(set(text))
char_to_idx = {char: idx for idx, char in enumerate(vocab)}
idx_to_char = np.array(vocab)
text_as_int = np.array([char_to_idx[c] for c in text])

# Create input-output pairs
seq_length = 10
examples_per_epoch = len(text) // seq_length
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)

def split_input_target(chunk):
    input_text = chunk[:-1]
    target_text = chunk[1:]
    return input_text, target_text

dataset = sequences.map(split_input_target).batch(32, drop_remainder=True)

# Define the LSTM model
class LSTMTextGenerator(tf.keras.Model):
    def __init__(self, vocab_size, embed_size, lstm_units):
        super(LSTMTextGenerator, self).__init__()
        self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size)
        self.lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
        self.fc = tf.keras.layers.Dense(vocab_size)

    def call(self, inputs, states=None):
        x = self.embedding(inputs)
        output, state_h, state_c = self.lstm(x, initial_state=states)
        logits = self.fc(output)
        return logits, [state_h, state_c]

# Hyperparameters
vocab_size = len(vocab)
embed_size = 64
lstm_units = 128

# Instantiate the model
model = LSTMTextGenerator(vocab_size, embed_size, lstm_units)

# Loss function and optimizer
def loss_fn(labels, logits):
    return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)

model.compile(optimizer='adam', loss=loss_fn)

# Train the model
model.fit(dataset, epochs=10)

# Text generation function
def generate_text(model, start_string, num_generate=100):
    input_eval = [char_to_idx[s] for s in start_string]
    input_eval = tf.expand_dims(input_eval, 0)

    generated_text = []
    states = None
    for _ in range(num_generate):
        predictions, states = model(input_eval, states=states)
        predictions = tf.squeeze(predictions, 0)

        predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
        input_eval = tf.expand_dims([predicted_id], 0)

        generated_text.append(idx_to_char[predicted_id])

    return start_string + ''.join(generated_text)

# Generate text
generated_text = generate_text(model, start_string="To be")
print("Generated text:", generated_text)

In this exercise:

  • We used a character-level LSTM to generate text. The model was trained on a small sequence from Shakespeare’s text.
  • The model was trained to predict the next character based on previous ones.
  • After training, we generated new text using the LSTM.

Exercise 3: Implement a Transformer for Sequence-to-Sequence Learning

Task: Implement a transformer model for sequence-to-sequence translation. Use dummy data to train the transformer on translating sequences from one domain to another (e.g., numbers to words).

Solution:

import torch
import torch.nn as nn

# Define a basic transformer model for sequence-to-sequence translation
class TransformerModel(nn.Module):
    def __init__(self, embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.transformer = nn.Transformer(
            d_model=embed_size,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=ff_hidden_dim,
        )
        self.fc_out = nn.Linear(embed_size, vocab_size)

    def forward(self, src, tgt):
        src_emb = self.embedding(src)
        tgt_emb = self.embedding(tgt)
        transformer_output = self.transformer(src_emb, tgt_emb)
        return self.fc_out(transformer_output)

# Example inputs (sequence_length=10, batch_size=32)
src = torch.randint(0, 100, (10, 32))  # Source sequence (e.g., numbers)
tgt = torch.randint(0, 100, (10, 32))  # Target sequence (e.g., words)

# Hyperparameters
embed_size = 64
num_heads = 8
num_encoder_layers = 6
num_decoder_layers = 6
ff_hidden_dim = 128
vocab_size = 100

# Initialize the transformer model
model = TransformerModel(embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size)

# Forward pass through the transformer
output = model(src, tgt)
print("Transformer output shape:", output.shape)

In this exercise:

  • We implemented a simple transformer for sequence-to-sequence tasks.
  • The model encodes the source sequence and decodes it to generate the target sequence.
  • We used dummy data to simulate sequence translation.

These practical exercises covered important concepts in sequence modeling using RNNsLSTMs, and Transformers. From building simple RNNs for sequence classification to generating text with LSTMs and implementing transformers for sequence translation, these exercises demonstrate how powerful and versatile these architectures are in handling sequential data.

Practical Exercises Chapter 6

Exercise 1: Implement a Simple RNN for Sequence Classification

Task: Implement a simple RNN to classify sequences of numbers. Use synthetic data where each sequence is classified as positive if the sum of the elements is above a threshold, and negative otherwise.

Solution:

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

# Generate synthetic data (binary classification based on sequence sum)
def generate_data(num_samples=1000, sequence_length=10, threshold=5):
    X = torch.randint(0, 3, (num_samples, sequence_length)).float()
    y = (X.sum(dim=1) > threshold).float()
    return X, y

# Define the RNN model
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out, _ = self.rnn(x)
        out = self.fc(out[:, -1, :])  # Use the output from the last time step
        return out

# Hyperparameters
input_size = 1
hidden_size = 16
output_size = 1
learning_rate = 0.001
epochs = 5

# Generate data
X, y = generate_data()
X = X.unsqueeze(-1)  # Add input size dimension
dataset = TensorDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Initialize model, loss function, and optimizer
model = SimpleRNN(input_size, hidden_size, output_size)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    print(f"Epoch {epoch+1}, Loss: {running_loss / len(dataloader)}")

# Example prediction
with torch.no_grad():
    example_seq = torch.tensor([[0, 1, 2, 0, 1, 2, 1, 0, 2, 1]]).float().unsqueeze(-1)
    output = model(example_seq)
    print("Predicted output:", torch.sigmoid(output))

In this exercise:

  • We created synthetic sequences where the sum of elements determines the class.
  • A simple RNN was used to classify sequences as positive or negative.
  • The model was trained using binary cross-entropy loss and evaluated with an example sequence.

Exercise 2: Implement an LSTM for Text Generation

Task: Train an LSTM on character-level text data to generate new text. Use a simple dataset like Shakespeare’s text.

Solution:

import tensorflow as tf
import numpy as np

# Load dataset (for simplicity, we use a small string for text generation)
text = "To be, or not to be, that is the question."

# Preprocess the data
vocab = sorted(set(text))
char_to_idx = {char: idx for idx, char in enumerate(vocab)}
idx_to_char = np.array(vocab)
text_as_int = np.array([char_to_idx[c] for c in text])

# Create input-output pairs
seq_length = 10
examples_per_epoch = len(text) // seq_length
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)

def split_input_target(chunk):
    input_text = chunk[:-1]
    target_text = chunk[1:]
    return input_text, target_text

dataset = sequences.map(split_input_target).batch(32, drop_remainder=True)

# Define the LSTM model
class LSTMTextGenerator(tf.keras.Model):
    def __init__(self, vocab_size, embed_size, lstm_units):
        super(LSTMTextGenerator, self).__init__()
        self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size)
        self.lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
        self.fc = tf.keras.layers.Dense(vocab_size)

    def call(self, inputs, states=None):
        x = self.embedding(inputs)
        output, state_h, state_c = self.lstm(x, initial_state=states)
        logits = self.fc(output)
        return logits, [state_h, state_c]

# Hyperparameters
vocab_size = len(vocab)
embed_size = 64
lstm_units = 128

# Instantiate the model
model = LSTMTextGenerator(vocab_size, embed_size, lstm_units)

# Loss function and optimizer
def loss_fn(labels, logits):
    return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)

model.compile(optimizer='adam', loss=loss_fn)

# Train the model
model.fit(dataset, epochs=10)

# Text generation function
def generate_text(model, start_string, num_generate=100):
    input_eval = [char_to_idx[s] for s in start_string]
    input_eval = tf.expand_dims(input_eval, 0)

    generated_text = []
    states = None
    for _ in range(num_generate):
        predictions, states = model(input_eval, states=states)
        predictions = tf.squeeze(predictions, 0)

        predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
        input_eval = tf.expand_dims([predicted_id], 0)

        generated_text.append(idx_to_char[predicted_id])

    return start_string + ''.join(generated_text)

# Generate text
generated_text = generate_text(model, start_string="To be")
print("Generated text:", generated_text)

In this exercise:

  • We used a character-level LSTM to generate text. The model was trained on a small sequence from Shakespeare’s text.
  • The model was trained to predict the next character based on previous ones.
  • After training, we generated new text using the LSTM.

Exercise 3: Implement a Transformer for Sequence-to-Sequence Learning

Task: Implement a transformer model for sequence-to-sequence translation. Use dummy data to train the transformer on translating sequences from one domain to another (e.g., numbers to words).

Solution:

import torch
import torch.nn as nn

# Define a basic transformer model for sequence-to-sequence translation
class TransformerModel(nn.Module):
    def __init__(self, embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.transformer = nn.Transformer(
            d_model=embed_size,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=ff_hidden_dim,
        )
        self.fc_out = nn.Linear(embed_size, vocab_size)

    def forward(self, src, tgt):
        src_emb = self.embedding(src)
        tgt_emb = self.embedding(tgt)
        transformer_output = self.transformer(src_emb, tgt_emb)
        return self.fc_out(transformer_output)

# Example inputs (sequence_length=10, batch_size=32)
src = torch.randint(0, 100, (10, 32))  # Source sequence (e.g., numbers)
tgt = torch.randint(0, 100, (10, 32))  # Target sequence (e.g., words)

# Hyperparameters
embed_size = 64
num_heads = 8
num_encoder_layers = 6
num_decoder_layers = 6
ff_hidden_dim = 128
vocab_size = 100

# Initialize the transformer model
model = TransformerModel(embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size)

# Forward pass through the transformer
output = model(src, tgt)
print("Transformer output shape:", output.shape)

In this exercise:

  • We implemented a simple transformer for sequence-to-sequence tasks.
  • The model encodes the source sequence and decodes it to generate the target sequence.
  • We used dummy data to simulate sequence translation.

These practical exercises covered important concepts in sequence modeling using RNNsLSTMs, and Transformers. From building simple RNNs for sequence classification to generating text with LSTMs and implementing transformers for sequence translation, these exercises demonstrate how powerful and versatile these architectures are in handling sequential data.

Practical Exercises Chapter 6

Exercise 1: Implement a Simple RNN for Sequence Classification

Task: Implement a simple RNN to classify sequences of numbers. Use synthetic data where each sequence is classified as positive if the sum of the elements is above a threshold, and negative otherwise.

Solution:

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

# Generate synthetic data (binary classification based on sequence sum)
def generate_data(num_samples=1000, sequence_length=10, threshold=5):
    X = torch.randint(0, 3, (num_samples, sequence_length)).float()
    y = (X.sum(dim=1) > threshold).float()
    return X, y

# Define the RNN model
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out, _ = self.rnn(x)
        out = self.fc(out[:, -1, :])  # Use the output from the last time step
        return out

# Hyperparameters
input_size = 1
hidden_size = 16
output_size = 1
learning_rate = 0.001
epochs = 5

# Generate data
X, y = generate_data()
X = X.unsqueeze(-1)  # Add input size dimension
dataset = TensorDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Initialize model, loss function, and optimizer
model = SimpleRNN(input_size, hidden_size, output_size)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    print(f"Epoch {epoch+1}, Loss: {running_loss / len(dataloader)}")

# Example prediction
with torch.no_grad():
    example_seq = torch.tensor([[0, 1, 2, 0, 1, 2, 1, 0, 2, 1]]).float().unsqueeze(-1)
    output = model(example_seq)
    print("Predicted output:", torch.sigmoid(output))

In this exercise:

  • We created synthetic sequences where the sum of elements determines the class.
  • A simple RNN was used to classify sequences as positive or negative.
  • The model was trained using binary cross-entropy loss and evaluated with an example sequence.

Exercise 2: Implement an LSTM for Text Generation

Task: Train an LSTM on character-level text data to generate new text. Use a simple dataset like Shakespeare’s text.

Solution:

import tensorflow as tf
import numpy as np

# Load dataset (for simplicity, we use a small string for text generation)
text = "To be, or not to be, that is the question."

# Preprocess the data
vocab = sorted(set(text))
char_to_idx = {char: idx for idx, char in enumerate(vocab)}
idx_to_char = np.array(vocab)
text_as_int = np.array([char_to_idx[c] for c in text])

# Create input-output pairs
seq_length = 10
examples_per_epoch = len(text) // seq_length
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)

def split_input_target(chunk):
    input_text = chunk[:-1]
    target_text = chunk[1:]
    return input_text, target_text

dataset = sequences.map(split_input_target).batch(32, drop_remainder=True)

# Define the LSTM model
class LSTMTextGenerator(tf.keras.Model):
    def __init__(self, vocab_size, embed_size, lstm_units):
        super(LSTMTextGenerator, self).__init__()
        self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size)
        self.lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
        self.fc = tf.keras.layers.Dense(vocab_size)

    def call(self, inputs, states=None):
        x = self.embedding(inputs)
        output, state_h, state_c = self.lstm(x, initial_state=states)
        logits = self.fc(output)
        return logits, [state_h, state_c]

# Hyperparameters
vocab_size = len(vocab)
embed_size = 64
lstm_units = 128

# Instantiate the model
model = LSTMTextGenerator(vocab_size, embed_size, lstm_units)

# Loss function and optimizer
def loss_fn(labels, logits):
    return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)

model.compile(optimizer='adam', loss=loss_fn)

# Train the model
model.fit(dataset, epochs=10)

# Text generation function
def generate_text(model, start_string, num_generate=100):
    input_eval = [char_to_idx[s] for s in start_string]
    input_eval = tf.expand_dims(input_eval, 0)

    generated_text = []
    states = None
    for _ in range(num_generate):
        predictions, states = model(input_eval, states=states)
        predictions = tf.squeeze(predictions, 0)

        predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
        input_eval = tf.expand_dims([predicted_id], 0)

        generated_text.append(idx_to_char[predicted_id])

    return start_string + ''.join(generated_text)

# Generate text
generated_text = generate_text(model, start_string="To be")
print("Generated text:", generated_text)

In this exercise:

  • We used a character-level LSTM to generate text. The model was trained on a small sequence from Shakespeare’s text.
  • The model was trained to predict the next character based on previous ones.
  • After training, we generated new text using the LSTM.

Exercise 3: Implement a Transformer for Sequence-to-Sequence Learning

Task: Implement a transformer model for sequence-to-sequence translation. Use dummy data to train the transformer on translating sequences from one domain to another (e.g., numbers to words).

Solution:

import torch
import torch.nn as nn

# Define a basic transformer model for sequence-to-sequence translation
class TransformerModel(nn.Module):
    def __init__(self, embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.transformer = nn.Transformer(
            d_model=embed_size,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=ff_hidden_dim,
        )
        self.fc_out = nn.Linear(embed_size, vocab_size)

    def forward(self, src, tgt):
        src_emb = self.embedding(src)
        tgt_emb = self.embedding(tgt)
        transformer_output = self.transformer(src_emb, tgt_emb)
        return self.fc_out(transformer_output)

# Example inputs (sequence_length=10, batch_size=32)
src = torch.randint(0, 100, (10, 32))  # Source sequence (e.g., numbers)
tgt = torch.randint(0, 100, (10, 32))  # Target sequence (e.g., words)

# Hyperparameters
embed_size = 64
num_heads = 8
num_encoder_layers = 6
num_decoder_layers = 6
ff_hidden_dim = 128
vocab_size = 100

# Initialize the transformer model
model = TransformerModel(embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size)

# Forward pass through the transformer
output = model(src, tgt)
print("Transformer output shape:", output.shape)

In this exercise:

  • We implemented a simple transformer for sequence-to-sequence tasks.
  • The model encodes the source sequence and decodes it to generate the target sequence.
  • We used dummy data to simulate sequence translation.

These practical exercises covered important concepts in sequence modeling using RNNsLSTMs, and Transformers. From building simple RNNs for sequence classification to generating text with LSTMs and implementing transformers for sequence translation, these exercises demonstrate how powerful and versatile these architectures are in handling sequential data.

Practical Exercises Chapter 6

Exercise 1: Implement a Simple RNN for Sequence Classification

Task: Implement a simple RNN to classify sequences of numbers. Use synthetic data where each sequence is classified as positive if the sum of the elements is above a threshold, and negative otherwise.

Solution:

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

# Generate synthetic data (binary classification based on sequence sum)
def generate_data(num_samples=1000, sequence_length=10, threshold=5):
    X = torch.randint(0, 3, (num_samples, sequence_length)).float()
    y = (X.sum(dim=1) > threshold).float()
    return X, y

# Define the RNN model
class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        out, _ = self.rnn(x)
        out = self.fc(out[:, -1, :])  # Use the output from the last time step
        return out

# Hyperparameters
input_size = 1
hidden_size = 16
output_size = 1
learning_rate = 0.001
epochs = 5

# Generate data
X, y = generate_data()
X = X.unsqueeze(-1)  # Add input size dimension
dataset = TensorDataset(X, y)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Initialize model, loss function, and optimizer
model = SimpleRNN(input_size, hidden_size, output_size)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(epochs):
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    print(f"Epoch {epoch+1}, Loss: {running_loss / len(dataloader)}")

# Example prediction
with torch.no_grad():
    example_seq = torch.tensor([[0, 1, 2, 0, 1, 2, 1, 0, 2, 1]]).float().unsqueeze(-1)
    output = model(example_seq)
    print("Predicted output:", torch.sigmoid(output))

In this exercise:

  • We created synthetic sequences where the sum of elements determines the class.
  • A simple RNN was used to classify sequences as positive or negative.
  • The model was trained using binary cross-entropy loss and evaluated with an example sequence.

Exercise 2: Implement an LSTM for Text Generation

Task: Train an LSTM on character-level text data to generate new text. Use a simple dataset like Shakespeare’s text.

Solution:

import tensorflow as tf
import numpy as np

# Load dataset (for simplicity, we use a small string for text generation)
text = "To be, or not to be, that is the question."

# Preprocess the data
vocab = sorted(set(text))
char_to_idx = {char: idx for idx, char in enumerate(vocab)}
idx_to_char = np.array(vocab)
text_as_int = np.array([char_to_idx[c] for c in text])

# Create input-output pairs
seq_length = 10
examples_per_epoch = len(text) // seq_length
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)

def split_input_target(chunk):
    input_text = chunk[:-1]
    target_text = chunk[1:]
    return input_text, target_text

dataset = sequences.map(split_input_target).batch(32, drop_remainder=True)

# Define the LSTM model
class LSTMTextGenerator(tf.keras.Model):
    def __init__(self, vocab_size, embed_size, lstm_units):
        super(LSTMTextGenerator, self).__init__()
        self.embedding = tf.keras.layers.Embedding(vocab_size, embed_size)
        self.lstm = tf.keras.layers.LSTM(lstm_units, return_sequences=True, return_state=True)
        self.fc = tf.keras.layers.Dense(vocab_size)

    def call(self, inputs, states=None):
        x = self.embedding(inputs)
        output, state_h, state_c = self.lstm(x, initial_state=states)
        logits = self.fc(output)
        return logits, [state_h, state_c]

# Hyperparameters
vocab_size = len(vocab)
embed_size = 64
lstm_units = 128

# Instantiate the model
model = LSTMTextGenerator(vocab_size, embed_size, lstm_units)

# Loss function and optimizer
def loss_fn(labels, logits):
    return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)

model.compile(optimizer='adam', loss=loss_fn)

# Train the model
model.fit(dataset, epochs=10)

# Text generation function
def generate_text(model, start_string, num_generate=100):
    input_eval = [char_to_idx[s] for s in start_string]
    input_eval = tf.expand_dims(input_eval, 0)

    generated_text = []
    states = None
    for _ in range(num_generate):
        predictions, states = model(input_eval, states=states)
        predictions = tf.squeeze(predictions, 0)

        predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
        input_eval = tf.expand_dims([predicted_id], 0)

        generated_text.append(idx_to_char[predicted_id])

    return start_string + ''.join(generated_text)

# Generate text
generated_text = generate_text(model, start_string="To be")
print("Generated text:", generated_text)

In this exercise:

  • We used a character-level LSTM to generate text. The model was trained on a small sequence from Shakespeare’s text.
  • The model was trained to predict the next character based on previous ones.
  • After training, we generated new text using the LSTM.

Exercise 3: Implement a Transformer for Sequence-to-Sequence Learning

Task: Implement a transformer model for sequence-to-sequence translation. Use dummy data to train the transformer on translating sequences from one domain to another (e.g., numbers to words).

Solution:

import torch
import torch.nn as nn

# Define a basic transformer model for sequence-to-sequence translation
class TransformerModel(nn.Module):
    def __init__(self, embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.transformer = nn.Transformer(
            d_model=embed_size,
            nhead=num_heads,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=ff_hidden_dim,
        )
        self.fc_out = nn.Linear(embed_size, vocab_size)

    def forward(self, src, tgt):
        src_emb = self.embedding(src)
        tgt_emb = self.embedding(tgt)
        transformer_output = self.transformer(src_emb, tgt_emb)
        return self.fc_out(transformer_output)

# Example inputs (sequence_length=10, batch_size=32)
src = torch.randint(0, 100, (10, 32))  # Source sequence (e.g., numbers)
tgt = torch.randint(0, 100, (10, 32))  # Target sequence (e.g., words)

# Hyperparameters
embed_size = 64
num_heads = 8
num_encoder_layers = 6
num_decoder_layers = 6
ff_hidden_dim = 128
vocab_size = 100

# Initialize the transformer model
model = TransformerModel(embed_size, num_heads, num_encoder_layers, num_decoder_layers, ff_hidden_dim, vocab_size)

# Forward pass through the transformer
output = model(src, tgt)
print("Transformer output shape:", output.shape)

In this exercise:

  • We implemented a simple transformer for sequence-to-sequence tasks.
  • The model encodes the source sequence and decodes it to generate the target sequence.
  • We used dummy data to simulate sequence translation.

These practical exercises covered important concepts in sequence modeling using RNNsLSTMs, and Transformers. From building simple RNNs for sequence classification to generating text with LSTMs and implementing transformers for sequence translation, these exercises demonstrate how powerful and versatile these architectures are in handling sequential data.