Skip to content

Deep Learning with PyTorch: Code Examples

21. Defining Neural Networks: Using nn.Module

Creating a simple neural network class using nn.Module:

import torch.nn as nn

class SimpleNet(nn.Module):
    def __init__(self):
        super(SimpleNet, self).__init__()
        self.linear = nn.Linear(10, 5)  # An example layer

    def forward(self, x):
        return self.linear(x)

net = SimpleNet()
print(net)

22. Convolutional Neural Networks (CNNs)

Implementing a basic CNN for image recognition:

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)  # Input channels, output channels, kernel size
        self.pool = nn.MaxPool2d(2, 2)  # Kernel size, stride
        self.conv2 = nn.Conv2d(20, 50, 5)
        self.fc1 = nn.Linear(50 * 4 * 4, 500)
        self.fc2 = nn.Linear(500, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = x.view(-1, 50 * 4 * 4)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

cnn = CNN()
print(cnn)

23. Recurrent Neural Networks (RNNs)

Example of an RNN for sequential data processing:

class SimpleRNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers):
        super(SimpleRNN, self).__init__()
        self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)

    def forward(self, x):
        out, h_n = self.rnn(x)
        return out

rnn = SimpleRNN(10, 20, 2)
print(rnn)

24. Long Short-Term Memory Networks (LSTMs)

Creating an LSTM for sequence prediction:

class LSTMNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers):
        super(LSTMNet, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)

    def forward(self, x):
        out, (h_n, c_n) = self.lstm(x)
        return out

lstm = LSTMNet(10, 50, 1)
print(lstm)

25. Optimizer and Loss Functions

Setting up an optimizer and a loss function for training:

import torch.optim as optim

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)

26. Batch Normalization

Adding batch normalization to a network:

class NetWithBatchNorm(nn.Module):
    def __init__(self):
        super(NetWithBatchNorm, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.bn1 = nn.BatchNorm2d(20)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(20 * 12 * 12, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.bn1(self.conv1(x))))
        x = x.view(-1, 20 * 12 * 12)
        x = self.fc1(x)
        return x

bn_net = NetWithBatchNorm()
print(bn_net)

27. Dropout

Implementing dropout in a neural network to prevent overfitting:

class NetWithDropout(nn.Module):
    def __init__(self):
        super(NetWithDropout, self).__init__()
        self.fc1 = nn.Linear(100, 50)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(50, 10)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)
        return x

dropout_net = NetWithDropout()
print(dropout_net)

28. Custom Layers

Creating a custom layer

in PyTorch:

class CustomLayer(nn.Module):
    def __init__(self, size):
        super(CustomLayer, self).__init__()
        self.linear = nn.Linear(size, size)

    def forward(self, x):
        return torch.tanh(self.linear(x))

custom_layer = CustomLayer(10)
print(custom_layer)

29. Activation Functions

Using different activation functions:

activation_net = nn.Sequential(
    nn.Linear(10, 10),
    nn.ReLU(),
    nn.Linear(10, 10),
    nn.Sigmoid()
)
print(activation_net)

30. Fine-tuning Pretrained Models

Fine-tuning a pretrained model for a new task:

from torchvision import models
pretrained_model = models.resnet18(pretrained=True)
for param in pretrained_model.parameters():
    param.requires_grad = False  # Freeze all layers
# Replace the last layer
pretrained_model.fc = nn.Linear(pretrained_model.fc.in_features, 100)  # Assuming 100 classes
print(pretrained_model)

Continuing with the remaining topics in the Deep Learning with PyTorch series:

Continuing with the remaining topics in the Deep Learning with PyTorch series:

31. Data Loaders and Transformers

Efficiently handling and transforming data for model input:

from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms

class CustomDataset(Dataset):
    def __init__(self, data, transform=None):
        self.data = data
        self.transform = transform

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        x = self.data[index]
        if self.transform:
            x = self.transform(x)
        return x

# Example data and transformation
data = torch.randn(100, 3, 32, 32)  # Random data simulating images
transform = transforms.Compose([
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
dataset = CustomDataset(data, transform)
dataloader = DataLoader(dataset, batch_size=10)

for batch in dataloader:
    print(batch.shape)

32. Multi-GPU Training

Scaling up the training process across multiple GPUs:

model = CNN()  # Assuming CNN is defined as in the previous example
if torch.cuda.device_count() > 1:
    print("Using", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

model.to('cuda')

33. Advanced Backpropagation Techniques

Exploring techniques to enhance gradient flow and model training stability:

# Using gradient clipping in training loop
for input, target in dataloader:
    optimizer.zero_grad()
    output = model(input)
    loss = criterion(output, target)
    loss.backward()
    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)  # Clip gradients
    optimizer.step()

34. Hyperparameter Tuning

Optimizing learning rate, batch size, and other parameters:

from torch.optim.lr_scheduler import StepLR

optimizer = optim.SGD(model.parameters(), lr=0.01)
scheduler = StepLR(optimizer, step_size=30, gamma=0.1)  # Reduce LR every 30 epochs

for epoch in range(100):
    for input, target in dataloader:
        optimizer.zero_grad()
        output = model(input)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
    scheduler.step()  # Update learning rate

35. Model Evaluation Metrics

Implementing accuracy, precision, and recall:

def calculate_accuracy(output, target):
    preds = torch.argmax(output, dim=1)
    correct = torch.eq(preds, target).sum().float()
    accuracy = (correct / target.shape[0]) * 100
    return accuracy

# Usage in training loop
accuracy = calculate_accuracy(output, target)
print(f'Accuracy: {accuracy.item()}%')

36. Checkpointing Models

Saving and loading model checkpoints:

# Save checkpoint
torch.save({
    'epoch': epoch,
    'model_state_dict': model.state_dict(),
    'optimizer_state_dict': optimizer.state_dict(),
    'loss': loss,
}, 'model_checkpoint.pth')

# Load checkpoint
checkpoint = torch.load('model_checkpoint.pth')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
loss = checkpoint['loss']

37. Visualization of Model Training

Using TensorBoard for visualizing training progress:

from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter()

for epoch in range(100):
    for input, target in dataloader:
        optimizer.zero_grad()
        output = model(input)
        loss = criterion(output, target)
        writer.add_scalar('Loss/train', loss, epoch)
        loss.backward()
        optimizer.step()

writer.close()

38. Implementing Attention Mechanisms

Adding an attention layer to improve model interpretability:

class AttentionNet(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(AttentionNet, self).__init__()
        self.attention = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, 1)
        )
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        weights = torch.softmax(self.attention(x), dim=0)
        context = torch.sum(weights * x, dim=0)
        return self.fc(context)

attention_model = AttentionNet(10, 20, 1)
print(attention_model)

39. **Generative Adversarial Networks (GANs

)** Creating a simple GAN to generate new data instances:

class Generator(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(Generator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Linear(hidden_size, output_size),
            nn.Tanh()
        )

class Discriminator(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(Discriminator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.LeakyReLU(0.2),
            nn.Linear(hidden_size, 1),
            nn.Sigmoid()
        )

# Instantiate models
generator = Generator(100, 256, 784)  # Example for generating MNIST-like data
discriminator = Discriminator(784, 256)

40. Transformer Models

Implementing a basic transformer model for NLP tasks:

from torch.nn import Transformer

class TransformerModel(nn.Module):
    def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
        super(TransformerModel, self).__init__()
        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(ninp, dropout)
        self.encoder = nn.Embedding(ntoken, ninp)
        self.transformer = Transformer(ninp, nhead, nhid, nlayers, dropout)
        self.decoder = nn.Linear(ninp, ntoken)

    def forward(self, src):
        src = self.encoder(src) * math.sqrt(self.ninp)
        src = self.pos_encoder(src)
        output = self.transformer(src, self.src_mask)
        output = self.decoder(output)
        return output

# Example transformer configuration
transformer_model = TransformerModel(ntoken=1000, ninp=512, nhead=8, nhid=2048, nlayers=6)
print(transformer_model)

These examples conclude the overview of advanced deep learning techniques and implementations in PyTorch, providing a comprehensive toolkit for both research and development in various domains using this powerful framework.

Moving forward into the topics within the Advanced Topics in PyTorch:

41. Distributed Training

Implementing distributed training to scale across multiple nodes and GPUs:

import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP

def setup(rank, world_size):
    dist.init_process_group("nccl", rank=rank, world_size=world_size)

def cleanup():
    dist.destroy_process_group()

class DistributedModel(nn.Module):
    def __init__(self):
        super(DistributedModel, self).__init__()
        self.layer = nn.Linear(10, 10)

def train(rank, world_size):
    setup(rank, world_size)
    model = DistributedModel().to(rank)
    ddp_model = DDP(model, device_ids=[rank])
    # Training loop
    cleanup()

# Example usage with 2 GPUs
# setup(0, 2)
# setup(1, 2)
# You would typically run `train(rank, world_size)` in separate processes.

42. Model Quantization

Reducing model size and inference time by quantizing a PyTorch model:

model = models.resnet18(pretrained=True)
model.eval()

# Quantize model
quantized_model = torch.quantization.quantize_dynamic(
    model, {nn.Linear}, dtype=torch.qint8
)
print(quantized_model)

43. Model Pruning

Pruning a neural network to remove unnecessary weights:

def prune_model(model, amount=0.3):
    parameters_to_prune = (
        (model.conv1, 'weight'), (model.conv2, 'weight'), (model.fc1, 'weight')
    )
    for module, param in parameters_to_prune:
        torch.nn.utils.prune.l1_unstructured(module, param, amount=amount)

prune_model(model)
print(model)

44. Deploying to Production

Example of preparing a PyTorch model for deployment using TorchScript:

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.layer = nn.Linear(10, 5)

    def forward(self, x):
        return torch.relu(self.layer(x))

model = MyModel()
scripted_model = torch.jit.script(model)
scripted_model.save("model_scripted.pt")

45. Federated Learning

Implementing a simple federated learning setup:

# Assuming setup for multiple clients is done
def train_on_client(data, model):
    # Train model on client's data
    return model.state_dict()  # return model updates

def federated_average(models):
    # Average model weights from all clients
    with torch.no_grad():
        average_model = models[0]
        for model_dict in models[1:]:
            for name, param in model_dict.items():
                average_model[name].data += param.data
        for name, param in average_model.items():
            param.data /= len(models)
    return average_model

# Simulate federated learning
client_models = [train_on_client(data, model) for data in client_datasets]
global_model = federated_average(client_models)

46. Graph Neural Networks (GNNs)

Example of creating a GNN using PyTorch Geometric:

import torch_geometric.nn as geom_nn

class GNNModel(nn.Module):
    def __init__(self):
        super(GNNModel, self).__init__()
        self.conv1 = geom_nn.GCNConv(dataset.num_node_features, 16)
        self.conv2 = geom_nn.GCNConv(16, dataset.num_classes)

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = torch.relu(self.conv1(x, edge_index))
        x = torch.dropout(x, p=0.5, train=self.training)
        x = self.conv2(x, edge_index)
        return torch.log_softmax(x, dim=1)

gnn_model = GNNModel()
print(gnn_model)

47. Reinforcement Learning

Creating a simple RL agent using PyTorch:

import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical

class Policy(nn.Module):
    def __init__(self):
        super(Policy, self).__init__()
        self.fc = nn.Linear(4, 2)

    def forward(self, x):
        x = F.relu(self.fc(x))
        return Categorical(logits=x)

policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)

# Example RL loop
state = env.reset()
for t in range(1000):
    action_probs = policy(torch.from_numpy(state).float())
    action = action_probs.sample()
    next_state, reward,

 done, _ = env.step(action.item())
    optimizer.zero_grad()
    loss = -action_probs.log_prob(action) * reward
    loss.backward()
    optimizer.step()
    if done:
        break
    state = next_state

Continuing with more advanced topics in PyTorch:

48. Probabilistic Programming with PyTorch

Using Pyro, a probabilistic programming framework built on PyTorch:

import pyro
import pyro.distributions as dist

def model(data):
    alpha = torch.tensor(10.0)
    beta = torch.tensor(10.0)
    f = pyro.sample("latent_fairness", dist.Beta(alpha, beta))
    with pyro.plate("data", len(data)):
        pyro.sample("obs", dist.Bernoulli(f), obs=data)
    return f

data = torch.tensor([1.0, 0.0, 1.0, 1.0, 0.0])
fairness = model(data)
print(fairness)

49. Advanced Custom Autograd Functions

Creating custom autograd functions for specific gradient computations:

class MyReLU(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        return input.clamp(min=0)

    @staticmethod
    def backward(ctx, grad_output):
        input, = ctx.saved_tensors
        grad_input = grad_output.clone()
        grad_input[input < 0] = 0
        return grad_input

tensor = torch.tensor([-1.0, 2.0, -0.5, 4.0], requires_grad=True)
output = MyReLU.apply(tensor)
output.backward(torch.tensor([1.0, 1.0, 1.0, 1.0]))
print(tensor.grad)

50. Deep Reinforcement Learning

Implementing a deep Q-network (DQN) using PyTorch:

import torch.nn.functional as F
import random
import gym

class DQN(nn.Module):
    def __init__(self, inputs, outputs):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(inputs, 50)
        self.fc2 = nn.Linear(50, 50)
        self.fc3 = nn.Linear(50, outputs)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)

env = gym.make('CartPole-v1')
model = DQN(env.observation_space.shape[0], env.action_space.n)
optimizer = optim.Adam(model.parameters(), lr=1e-4)

# Sample training loop
state = torch.tensor(env.reset(), dtype=torch.float32)
for step in range(1000):
    q_values = model(state)
    action = torch.argmax(q_values).item()
    next_state, reward, done, _ = env.step(action)
    next_state = torch.tensor(next_state, dtype=torch.float32)

    # Simple reward + gamma * max(next_q_values)
    next_q_values = model(next_state)
    target_q_value = reward + 0.99 * torch.max(next_q_values)
    loss = F.mse_loss(q_values[action], target_q_value)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if done:
        break
    state = next_state

51. Neural Architecture Search (NAS)

Using NAS frameworks in PyTorch to automate model design:

# Hypothetical example, specific NAS tools would provide their own interfaces
import nni  # Neural Network Intelligence, a toolkit for NAS
from nni.algorithms.nas.pytorch import DartsTrainer

model = MyModel()  # Assuming a model compatible with DARTS
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.025)

trainer = DartsTrainer(
    model,
    loss=criterion,
    metrics=lambda output, target: accuracy(output, target),
    optimizer=optimizer,
    num_epochs=50,
    dataset_train=train_dataset,
    dataset_valid=val_dataset
)
trainer.train()

52. 3D Image Processing

Handling 3D medical imaging data with PyTorch:

import torchio as tio

# Example loading a 3D image
subject = tio.Subject(
    mri=tio.ScalarImage('path_to_3d_image.nii'),  # MRI file
    segmentation=tio.LabelMap('path_to_segmentation.nii')  # Segmentation file
)
transform = tio.RandomAffine()  # Example transformation
transformed_subject = transform(subject)

53. Meta Learning

Implementing a model for few-shot learning using the meta-learning approach:

class MetaLearner(nn.Module):
    def __init__(self):
        super(MetaLearner, self).__init__()
        self.fc1 = nn.Linear(784, 256)  # Example for MNIST


        self.fc2 = nn.Linear(256, 64)
        self.fc3 = nn.Linear(64, 10)

    def forward(self, x, fast_weights=None):
        x = x.view(-1, 784)  # Flatten MNIST images
        if fast_weights is None:
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
        else:
            x = F.relu(F.linear(x, *fast_weights['fc1']))
            x = F.relu(F.linear(x, *fast_weights['fc2']))
            x = F.linear(x, *fast_weights['fc3'])
        return x

learner = MetaLearner()

Continuing with additional advanced topics in PyTorch, focusing on a mix of specialized applications and techniques:

54. Multi-Task Learning

Developing a network that can handle multiple tasks simultaneously:

class MultiTaskModel(nn.Module):
    def __init__(self):
        super(MultiTaskModel, self).__init__()
        self.shared_layer = nn.Linear(100, 50)
        self.task1_layer = nn.Linear(50, 10)  # Task 1 might predict categories
        self.task2_layer = nn.Linear(50, 1)  # Task 2 might predict a continuous value

    def forward(self, x):
        x = torch.relu(self.shared_layer(x))
        out1 = self.task1_layer(x)
        out2 = self.task2_layer(x)
        return out1, out2

model = MultiTaskModel()
print(model)

55. Adversarial Training

Incorporating adversarial training to improve model robustness against adversarial attacks:

import torchattacks

model = CNN()  # Assuming CNN is already defined
atk = torchattacks.PGD(model, eps=0.3, alpha=0.01, steps=40)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

for images, labels in train_loader:
    images, labels = images.to(device), labels.to(device)
    # Generate adversarial images
    adv_images = atk(images, labels)
    outputs = model(adv_images)
    loss = nn.CrossEntropyLoss()(outputs, labels)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

56. Autoencoders and Variational Autoencoders (VAEs)

Building an autoencoder and a variational autoencoder for unsupervised learning tasks:

class Autoencoder(nn.Module):
    def __init__(self):
        super(Autoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(784, 400),
            nn.ReLU(),
            nn.Linear(400, 20)
        )
        self.decoder = nn.Sequential(
            nn.Linear(20, 400),
            nn.ReLU(),
            nn.Linear(400, 784),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.encoder(x.view(-1, 784))
        x = self.decoder(x)
        return x.view(-1, 28, 28)

# Variational Autoencoder
class VAE(nn.Module):
    def __init__(self):
        super(VAE, self).__init__()
        self.fc1 = nn.Linear(784, 400)
        self.fc21 = nn.Linear(400, 20)  # Mean
        self.fc22 = nn.Linear(400, 20)  # Log variance
        self.fc3 = nn.Linear(20, 400)
        self.fc4 = nn.Linear(400, 784)

    def encode(self, x):
        h1 = torch.relu(self.fc1(x))
        return self.fc21(h1), self.fc22(h1)

    def reparameterize(self, mu, logvar):
        std = torch.exp(0.5*logvar)
        eps = torch.randn_like(std)
        return mu + eps*std

    def decode(self, z):
        h3 = torch.relu(self.fc3(z))
        return torch.sigmoid(self.fc4(h3))

    def forward(self, x):
        mu, logvar = self.encode(x.view(-1, 784))
        z = self.reparameterize(mu, logvar)
        return self.decode(z), mu, logvar

ae_model = Autoencoder()
vae_model = VAE()
print(ae_model, vae_model)

57. Multimodal Learning

Creating models that can process and integrate information from multiple types of data sources:

class MultimodalNet(nn.Module):
    def __init__(self):
        super(MultimodalNet, self).__init__()
        self.text_module = nn.Sequential(
            nn.Embedding(1000, 100),
            nn.LSTM(100, 50)
        )
        self.image_module = nn.Sequential(
            nn.Conv2d(3, 16, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.fusion_layer = nn.Linear(50 + 16*14*14, 100)  # Assuming image size is 28x28
        self.classifier = nn.Linear(100, 10)

    def forward(self, text, image):
        text_features = self.text_module(text)
        image_features = self.image_module(image).view(-1, 16*14*14)
        combined_features = torch.cat((text_features, image_features), dim=1)
        combined_features = torch.relu(self.fusion_layer(combined

_features))
        output = self.classifier(combined_features)
        return output

multimodal_model = MultimodalNet()
print(multimodal_model)

58. Optimization Algorithms

Exploring different optimization techniques beyond SGD and Adam:

# Using RMSprop as an example
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)

59. Natural Language Understanding

Implementing a model for comprehensive NLP tasks such as question answering:

class NLUModel(nn.Module):
    def __init__(self):
        super(NLUModel, self).__init__()
        self.embedding = nn.Embedding(10000, 300)
        self.lstm = nn.LSTM(300, 100, batch_first=True)
        self.classifier = nn.Linear(100, 50)  # Example for classification

    def forward(self, input_ids):
        x = self.embedding(input_ids)
        x, _ = self.lstm(x)
        x = x[:, -1, :]  # Get the output from the last LSTM cell
        logits = self.classifier(x)
        return logits

nlu_model = NLUModel()
print(nlu_model)

60. PyTorch and ONNX

Exporting PyTorch models to the ONNX format for compatibility with other deep learning frameworks:

import torch.onnx

class SimpleModel(nn.Module):
    def __init__(self):
        super(SimpleModel, self).__init__()
        self.linear = nn.Linear(10, 5)

    def forward(self, x):
        return torch.relu(self.linear(x))

model = SimpleModel()
dummy_input = torch.randn(10, 10)
torch.onnx.export(model, dummy_input, "model.onnx", verbose=True)

61. Integration with Flask

Creating a web API using Flask to serve PyTorch models:

from flask import Flask, request, jsonify
import torch

app = Flask(__name__)
model = SimpleModel()
model.load_state_dict(torch.load('model.pth'))
model.eval()

@app.route('/predict', methods=['POST'])
def predict():
    data = request.get_json()
    inputs = torch.tensor(data['input'])
    with torch.no_grad():
        prediction = model(inputs)
    return jsonify({'prediction': prediction.tolist()})

if __name__ == '__main__':
    app.run(debug=True)

62. Using PyTorch with Docker

Containerizing a PyTorch application using Docker for easy deployment:

# Use an official PyTorch runtime as a parent image
FROM pytorch/pytorch:1.7.1-cuda11.0-cudnn8-runtime

# Set the working directory in the container
WORKDIR /usr/src/app

# Copy the current directory contents into the container at /usr/src/app
COPY . .

# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt

# Run app.py when the container launches
CMD ["python", "app.py"]

63. PyTorch and Mobile Deployment

Using TorchScript to prepare and deploy PyTorch models on mobile devices:

model = SimpleModel()
scripted_model = torch.jit.script(model)  # Convert to TorchScript
scripted_model.save('model_mobile.pt')    # Save the scripted model

64. PyTorch and Cloud Platforms

Example of using PyTorch on cloud platforms like AWS, integrating with S3 for data storage:

import boto3
import torch
from torch.utils.data import Dataset

class S3Dataset(Dataset):
    def __init__(self, s3_bucket, s3_keys):
        self.s3_bucket = s3_bucket
        self.s3_keys = s3_keys
        self.s3_client = boto3.client('s3')

    def __len__(self):
        return len(self.s3_keys)

    def __getitem__(self, idx):
        response = self.s3_client.get_object(Bucket=self.s3_bucket, Key=self.s3_keys[idx])
        data = torch.load(response['Body'])
        return data

s3_dataset = S3Dataset('my-s3-bucket', ['data1.pt', 'data2.pt'])

65. TorchServe for Model Serving

Setting up TorchServe to serve a PyTorch model, allowing easy model deployment:

# First, install TorchServe
pip install torchserve torch-model-archiver

# Create a TorchServe model archive
torch-model-archiver --model-name my_model --version 1.0 --model-file model.py --serialized-file model.pth --handler my_handler.py

# Start the TorchServe server with the model
torchserve --start --model-store model_store --models my_model.mar

66. Connecting PyTorch with Apache Kafka

Integrating PyTorch with Kafka for real-time data processing and inference:

from kafka import KafkaConsumer
import json
import torch

consumer = KafkaConsumer(
    'topic_name',
    bootstrap_servers=['localhost:9092'],
    value_deserializer=lambda m: json.loads(m.decode('utf-8'))
)

model = SimpleModel()
model.eval()

for message in consumer:
    data = torch.tensor(message.value['data'])
    with torch.no_grad():
        output = model(data)
    print(output)

67. PyTorch in Robotics

Using PyTorch for developing robotics applications, such as for motion planning and object recognition: ```python

Example of a neural network for robotic gripper control

class RoboticArmModel(nn.Module): def init(self): super(RoboticArmModel, self).init() self.fc1 = nn.Linear(3, 128) # 3D position inputs self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(