我有这个训练循环
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = torch.stack(X).to(device), torch.stack(y).to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
Run Code Online (Sandbox Code Playgroud)
和这个lstm:
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
class BELT_LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super (BELT_LSTM, self).__init__()
self.hidden_size = …
Run Code Online (Sandbox Code Playgroud)