Bra*_*roy 3 python machine-learning keras tensorflow pytorch
我正在将Keras代码转换为PyTorch,因为我比后者更熟悉后者.但是,我发现它不是学习(或者只是勉强).
下面我提供了几乎所有的PyTorch代码,包括初始化代码,以便您可以自己尝试.你需要自己提供的唯一一件事就是嵌入这个词(我相信你可以在网上找到很多word2vec模型).第一个输入文件应该是带有标记文本的文件,第二个输入文件应该是一个带有浮点数的文件,每行一个.因为我提供了所有代码,所以这个问题可能看起来庞大而且过于宽泛.但是,我的问题是具体的,我认为:我的模型或训练循环中的错误导致我的模型没有或几乎没有改善.(见下面的结果.)
我试图在适用情况下提供了很多意见,我所提供的形状变换以及因此您不必有运行代码,看看是怎么回事.数据准备方法对于检查并不重要.
最重要的部分是前进方法RegressorNet和训练循环RegressionNN(诚然,这些名称被严格选择).我认为错误在某处.
from pathlib import Path
import time
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import gensim
from scipy.stats import pearsonr
from LazyTextDataset import LazyTextDataset
class RegressorNet(nn.Module):
def __init__(self, hidden_dim, embeddings=None, drop_prob=0.0):
super(RegressorNet, self).__init__()
self.hidden_dim = hidden_dim
self.drop_prob = drop_prob
# Load pretrained w2v model, but freeze it: don't retrain it.
self.word_embeddings = nn.Embedding.from_pretrained(embeddings)
self.word_embeddings.weight.requires_grad = False
self.w2v_rnode = nn.GRU(embeddings.size(1), hidden_dim, bidirectional=True, dropout=drop_prob)
self.dropout = nn.Dropout(drop_prob)
self.linear = nn.Linear(hidden_dim * 2, 1)
# LeakyReLU rather than ReLU so that we don't get stuck in a dead nodes
self.lrelu = nn.LeakyReLU()
def forward(self, batch_size, sentence_input):
# shape sizes for:
# * batch_size 128
# * embeddings of dim 146
# * hidden dim of 200
# * sentence length of 20
# sentence_input: torch.Size([128, 20])
# Get word2vec vector representation
embeds = self.word_embeddings(sentence_input)
# embeds: torch.Size([128, 20, 146])
# embeds.view(-1, batch_size, embeds.size(2)): torch.Size([20, 128, 146])
# Input vectors into GRU, only keep track of output
w2v_out, _ = self.w2v_rnode(embeds.view(-1, batch_size, embeds.size(2)))
# w2v_out = torch.Size([20, 128, 400])
# Leaky ReLU it
w2v_out = self.lrelu(w2v_out)
# Dropout some nodes
if self.drop_prob > 0:
w2v_out = self.dropout(w2v_out)
# w2v_out: torch.Size([20, 128, 400
# w2v_out[-1, :, :]: torch.Size([128, 400])
# Only use the last output of a sequence! Supposedly that cell outputs the final information
regression = self.linear(w2v_out[-1, :, :])
regression: torch.Size([128, 1])
return regression
class RegressionRNN:
def __init__(self, train_files=None, test_files=None, dev_files=None):
print('Using torch ' + torch.__version__)
self.datasets, self.dataloaders = RegressionRNN._set_data_loaders(train_files, test_files, dev_files)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = self.w2v_vocab = self.criterion = self.optimizer = self.scheduler = None
@staticmethod
def _set_data_loaders(train_files, test_files, dev_files):
# labels must be the last input file
datasets = {
'train': LazyTextDataset(train_files) if train_files is not None else None,
'test': LazyTextDataset(test_files) if test_files is not None else None,
'valid': LazyTextDataset(dev_files) if dev_files is not None else None
}
dataloaders = {
'train': DataLoader(datasets['train'], batch_size=128, shuffle=True, num_workers=4) if train_files is not None else None,
'test': DataLoader(datasets['test'], batch_size=128, num_workers=4) if test_files is not None else None,
'valid': DataLoader(datasets['valid'], batch_size=128, num_workers=4) if dev_files is not None else None
}
return datasets, dataloaders
@staticmethod
def prepare_lines(data, split_on=None, cast_to=None, min_size=None, pad_str=None, max_size=None, to_numpy=False,
list_internal=False):
""" Converts the string input (line) to an applicable format. """
out = []
for line in data:
line = line.strip()
if split_on:
line = line.split(split_on)
line = list(filter(None, line))
else:
line = [line]
if cast_to is not None:
line = [cast_to(l) for l in line]
if min_size is not None and len(line) < min_size:
# pad line up to a number of tokens
line += (min_size - len(line)) * ['@pad@']
elif max_size and len(line) > max_size:
line = line[:max_size]
if list_internal:
line = [[item] for item in line]
if to_numpy:
line = np.array(line)
out.append(line)
if to_numpy:
out = np.array(out)
return out
def prepare_w2v(self, data):
idxs = []
for seq in data:
tok_idxs = []
for word in seq:
# For every word, get its index in the w2v model.
# If it doesn't exist, use @unk@ (available in the model).
try:
tok_idxs.append(self.w2v_vocab[word].index)
except KeyError:
tok_idxs.append(self.w2v_vocab['@unk@'].index)
idxs.append(tok_idxs)
idxs = torch.tensor(idxs, dtype=torch.long)
return idxs
def train(self, epochs=10):
valid_loss_min = np.Inf
train_losses, valid_losses = [], []
for epoch in range(1, epochs + 1):
epoch_start = time.time()
train_loss, train_results = self._train_valid('train')
valid_loss, valid_results = self._train_valid('valid')
# Calculate Pearson correlation between prediction and target
try:
train_pearson = pearsonr(train_results['predictions'], train_results['targets'])
except FloatingPointError:
train_pearson = "Could not calculate Pearsonr"
try:
valid_pearson = pearsonr(valid_results['predictions'], valid_results['targets'])
except FloatingPointError:
valid_pearson = "Could not calculate Pearsonr"
# calculate average losses
train_loss = np.mean(train_loss)
valid_loss = np.mean(valid_loss)
train_losses.append(train_loss)
valid_losses.append(valid_loss)
# print training/validation statistics
print(f'----------\n'
f'Epoch {epoch} - completed in {(time.time() - epoch_start):.0f} seconds\n'
f'Training Loss: {train_loss:.6f}\t Pearson: {train_pearson}\n'
f'Validation loss: {valid_loss:.6f}\t Pearson: {valid_pearson}')
# validation loss has decreased
if valid_loss <= valid_loss_min and train_loss > valid_loss:
print(f'!! Validation loss decreased ({valid_loss_min:.6f} --> {valid_loss:.6f}). Saving model ...')
valid_loss_min = valid_loss
if train_loss <= valid_loss:
print('!! Training loss is lte validation loss. Might be overfitting!')
# Optimise with scheduler
if self.scheduler is not None:
self.scheduler.step(valid_loss)
print('Done training...')
def _train_valid(self, do):
""" Do training or validating. """
if do not in ('train', 'valid'):
raise ValueError("Use 'train' or 'valid' for 'do'.")
results = {'predictions': np.array([]), 'targets': np.array([])}
losses = np.array([])
self.model = self.model.to(self.device)
if do == 'train':
self.model.train()
torch.set_grad_enabled(True)
else:
self.model.eval()
torch.set_grad_enabled(False)
for batch_idx, data in enumerate(self.dataloaders[do], 1):
# 1. Data prep
sentence = data[0]
target = data[-1]
curr_batch_size = target.size(0)
# Returns list of tokens, possibly padded @pad@
sentence = self.prepare_lines(sentence, split_on=' ', min_size=20, max_size=20)
# Converts tokens into w2v IDs as a Tensor
sent_w2v_idxs = self.prepare_w2v(sentence)
# Converts output to Tensor of floats
target = torch.Tensor(self.prepare_lines(target, cast_to=float))
# Move input to device
sent_w2v_idxs, target = sent_w2v_idxs.to(self.device), target.to(self.device)
# 2. Predictions
pred = self.model(curr_batch_size, sentence_input=sent_w2v_idxs)
loss = self.criterion(pred, target)
# 3. Optimise during training
if do == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 4. Save results
pred = pred.detach().cpu().numpy()
target = target.cpu().numpy()
results['predictions'] = np.append(results['predictions'], pred, axis=None)
results['targets'] = np.append(results['targets'], target, axis=None)
losses = np.append(losses, float(loss))
torch.set_grad_enabled(True)
return losses, results
if __name__ == '__main__':
HIDDEN_DIM = 200
# Load embeddings from pretrained gensim model
embed_p = Path('path-to.w2v_model').resolve()
w2v_model = gensim.models.KeyedVectors.load_word2vec_format(str(embed_p))
# add a padding token with only zeros
w2v_model.add(['@pad@'], [np.zeros(w2v_model.vectors.shape[1])])
embed_weights = torch.FloatTensor(w2v_model.vectors)
# Text files are used as input. Every line is one datapoint.
# *.tok.low.*: tokenized (space-separated) sentences
# *.cross: one floating point number per line, which we are trying to predict
regr = RegressionRNN(train_files=(r'train.tok.low.en',
r'train.cross'),
dev_files=(r'dev.tok.low.en',
r'dev.cross'),
test_files=(r'test.tok.low.en',
r'test.cross'))
regr.w2v_vocab = w2v_model.vocab
regr.model = RegressorNet(HIDDEN_DIM, embed_weights, drop_prob=0.2)
regr.criterion = nn.MSELoss()
regr.optimizer = optim.Adam(list(regr.model.parameters())[0:], lr=0.001)
regr.scheduler = optim.lr_scheduler.ReduceLROnPlateau(regr.optimizer, 'min', factor=0.1, patience=5, verbose=True)
regr.train(epochs=100)
Run Code Online (Sandbox Code Playgroud)
对于LazyTextDataset,您可以参考下面的类.
from torch.utils.data import Dataset
import linecache
class LazyTextDataset(Dataset):
def __init__(self, paths):
# labels are in the last path
self.paths, self.labels_path = paths[:-1], paths[-1]
with open(self.labels_path, encoding='utf-8') as fhin:
lines = 0
for line in fhin:
if line.strip() != '':
lines += 1
self.num_entries = lines
def __getitem__(self, idx):
data = [linecache.getline(p, idx + 1) for p in self.paths]
label = linecache.getline(self.labels_path, idx + 1)
return (*data, label)
def __len__(self):
return self.num_entries
Run Code Online (Sandbox Code Playgroud)
正如我之前写的,我正在尝试将Keras模型转换为PyTorch.原始的Keras代码不使用嵌入层,并且每个句子使用预先构建的word2vec向量作为输入.在下面的模型中,没有嵌入层.Keras摘要如下所示(我无法访问基本模型设置).
Layer (type) Output Shape Param # Connected to
====================================================================================================
bidirectional_1 (Bidirectional) (200, 400) 417600
____________________________________________________________________________________________________
dropout_1 (Dropout) (200, 800) 0 merge_1[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (200, 1) 801 dropout_1[0][0]
====================================================================================================
Run Code Online (Sandbox Code Playgroud)
问题在于,使用相同的输入,Keras模型可以工作,并在预测标签和实际标签之间获得+0.5 Pearson相关性.但是,上面的PyTorch模型似乎根本不起作用.为了给你一个想法,这是第一个时期之后的损失(均方误差)和Pearson(相关系数,p值):
Epoch 1 - completed in 11 seconds
Training Loss: 1.684495 Pearson: (-0.0006077809280690612, 0.8173368901481127)
Validation loss: 1.708228 Pearson: (0.017794288315261794, 0.4264098054188664)
Run Code Online (Sandbox Code Playgroud)
在第100个时代之后:
Epoch 100 - completed in 11 seconds
Training Loss: 1.660194 Pearson: (0.0020315421756790806, 0.4400929436716754)
Validation loss: 1.704910 Pearson: (-0.017288118524826892, 0.4396865964324158)
Run Code Online (Sandbox Code Playgroud)
下面绘制了损失(当您查看Y轴时,您可以看到改进是最小的).
可能有问题的最终指标是,对于我的140K输入线,每个时期在我的GTX 1080TI上只需要10秒.我觉得他并不多,我猜想优化不起作用/运行.但我无法弄明白为什么.发布可能会在我的火车循环或模型本身,但我找不到它.
同样,出现问题的原因一定是因为: - Keras模型确实表现良好; - 140K句子的训练速度"太快" - 训练后几乎没有改善.
我错过了什么?问题很可能出现在训练循环或网络结构中.
TL; DR:使用permute而不是view交换轴时,请参阅答案的结尾以获得对差异的直觉.
如果您正在使用,则无需冻结嵌入层from_pretrained.正如文档所述,它不使用渐变更新.
这部分:
self.w2v_rnode = nn.GRU(embeddings.size(1), hidden_dim, bidirectional=True, dropout=drop_prob)
Run Code Online (Sandbox Code Playgroud)
特别是dropout没有可提供性num_layers是完全没有意义的(因为浅层一层网络不能指定丢失).
BUG和MAIN ISSUE:在你的forward功能中,你使用的是view代替permute,在这里:
w2v_out, _ = self.w2v_rnode(embeds.view(-1, batch_size, embeds.size(2)))
Run Code Online (Sandbox Code Playgroud)
请参阅此答案以及每个功能的相应文档,并尝试使用此行:
w2v_out, _ = self.w2v_rnode(embeds.permute(1, 0, 2))
Run Code Online (Sandbox Code Playgroud)
您可以考虑batch_first=True在w2v_rnode创建过程中使用参数,您不必以这种方式置换索引.
检查torch.nn.GRU的文档,你是序列的最后一步,而不是你在那里的所有序列之后,所以你应该追求:
_, last_hidden = self.w2v_rnode(embeds.permute(1, 0, 2))
Run Code Online (Sandbox Code Playgroud)
但我认为这部分没问题.
没有进攻,但是prepare_lines是非常不可读,似乎非常难以维持为好,不是说察觉最终的bug(我想这是就出在这里).
首先,看起来你手动填充.请不要这样做,使用torch.nn.pad_sequence来处理批次!
本质上,首先你将每个句子中的每个单词编码为指向嵌入的索引(如你所做prepare_w2v),然后使用torch.nn.pad_sequence和/ torch.nn.pack_padded_sequence 或 torch.nn.pack_sequence如果行已经按长度排序.
这部分非常重要,看起来你根本就没有这样做(可能这是你实现中的第二个错误).
PyTorch的RNN单元不是作为填充张量的输入,而是作为torch.nn.PackedSequence对象.这是一个存储索引的有效对象,它指定每个序列的未填充长度.
查看该主题的更多信息这里,这里和在整个网络上的许多其他的博客文章.
批处理中的第一个序列必须是最长的,而所有其他序列必须以递减的长度提供.以下是:
要么是好的,那么你的呼叫对你来说似乎更直观.我喜欢做的或多或少以下,希望它有所帮助:
torch.utils.data.Dataset为每个geitem创建返回单个句子的常规对象,其中它返回为由features(torch.Tensor)和标签(单个值)组成的元组,看起来你也是这样做的.collate_fn与torch.utils.data.DataLoader一起使用的自定义,它负责在此场景中对每个批处理进行排序和填充(+它返回要传递到神经网络的每个句子的长度).torch.nn.pack_sequence里面的神经网络的forward方法(做嵌入了!)通过RNN层来推动它.说到第三点,这是一个示例实现collate_fn,你应该得到这样的想法:
import torch
def length_sort(features):
# Get length of each sentence in batch
sentences_lengths = torch.tensor(list(map(len, features)))
# Get indices which sort the sentences based on descending length
_, sorter = sentences_lengths.sort(descending=True)
# Pad batch as you have the lengths and sorter saved already
padded_features = torch.nn.utils.rnn.pad_sequence(features, batch_first=True)
return padded_features, sentences_lengths, sorter
def pad_collate_fn(batch):
# DataLoader return batch like that unluckily, check it on your own
features, labels = (
[element[0] for element in batch],
[element[1] for element in batch],
)
padded_features, sentences_lengths, sorter = length_sort(features)
# Sort by length features and labels accordingly
sorted_padded_features, sorted_labels = (
padded_features[sorter],
torch.tensor(labels)[sorter],
)
return sorted_padded_features, sorted_labels, sentences_lengths
Run Code Online (Sandbox Code Playgroud)
使用如那些collate_fn在DataLoaders你应该只是罚款(也许稍作调整,所以这是必要的,你明白的想法站在它后面).
训练循环:很多小错误的好地方,您可能希望通过使用PyTorch Ignite来最小化这些错误.我正在经历你的Tensorflow-like-Estimator-like-like-like训练循环(例如self.model = self.w2v_vocab = self.criterion = self.optimizer = self.scheduler = None这个)令人难以置信的艰难时期.请不要这样做,将每个任务(数据创建,数据加载,数据准备,模型设置,训练循环,日志记录)分离到它自己的相应模块中.总而言之,PyTorch/Keras比Tensorflow更具可读性和保密性是有原因的.
使嵌入的第一行等于包含零的向量:默认情况下,torch.nn.functional.embedding期望第一行用于填充.因此,您应该为每个单词的1开始唯一的索引,或者padding_idx为不同的值指定一个参数(尽管我强烈反对这种方法,最好让人感到困惑).
我希望这个答案可以帮助你至少一点点,如果在下面的评论中有些不清楚,我会尝试从不同的角度/更详细地解释它.
此代码不可重现,也不是特定问题.我们没有你正在使用的数据,我们都没有你的单词向量,随机种子没有固定等.
PS.最后一件事:检查你的数据的真实小部分(例如96个例子)的性能,如果它没有收敛,你很可能确实在你的代码中有一个错误.
关于时代:它们可能是关闭的(由于没有排序而不是我想的填充),通常Keras和PyTorch的时间非常相似(如果我理解你的问题的这一部分),以实现正确和有效的实现.
这个简单的例子显示了permute()和之间的差异view().第一个交换轴,而第二个不改变内存布局,只是将阵列块化为所需的形状(如果可能).
import torch
a = torch.tensor([[1, 2], [3, 4], [5, 6]])
print(a)
print(a.permute(1, 0))
print(a.view(2, 3))
Run Code Online (Sandbox Code Playgroud)
输出将是:
tensor([[1, 2],
[3, 4],
[5, 6]])
tensor([[1, 3, 5],
[2, 4, 6]])
tensor([[1, 2, 3],
[4, 5, 6]])
Run Code Online (Sandbox Code Playgroud)
reshape几乎就像view是为那些来自的人添加的numpy,所以对他们来说更容易也更自然,但它有一个重要的区别:
view 永远不会复制数据,只能在连续的内存上工作(所以在上面的排列之后你的数据可能不连续,因此对它的访问可能会更慢)reshape 如果需要可以复制数据,因此它也适用于非连续数组.| 归档时间: |
|
| 查看次数: |
527 次 |
| 最近记录: |