如何安装gdb从而能一路debug from python to C/C++
全流程梳理pytorch 多分类建模
代码
二元分类问题的Loss设定的注意事项:
代码3
为什么pytorch对beginner更友好
解读
y.backward()
y.backward(torch.FloatTensor(x.size())
如何展示处理中间层的输入和输出值
net.conv2.register_forward_hook(printnorm)
net.conv2.register_backward_hook(printgradnorm)
如何查看某一层的parameters 代码文档
conv2_param_list = list(self.parameters())
conv2_param_list.__len__()
conv2_param_list[0].size()
transfer_learning_tutorial
如何叠加多个图片transformation 代码文档
data_transforms = {
'train': transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
'val': transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
ImageFolder如何将图片folder转化成模型数据格式
data_dir = '/Users/Natsume/Desktop/data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
如何将变量信息注入到时间序列的颜色中
ax1 = plt.subplot2grid((2, 1), (0, 0), colspan=1, rowspan=1)
ax1.set_title("original close price with mv_avg_volume window %d" %vol_window)
for start, stop, col in zip(xy[:-1], xy[1:], color_data):
x, y = zip(start, stop)
ax1.plot(x, y, color=uniqueish_color3(col))
如何使用dataloader来做批量和随机
dataloders = {x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=4, shuffle=True, num_workers=4)
for x in ['train', 'val']}
给一个小批量图做plotting
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001)
inputs, classes = next(iter(dataloders['train']))
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
直接调用著名模型及其参数来用
model_ft = models.resnet18(pretrained=True)
调用的著名模型内部构造
model_ft = models.resnet18(pretrained=True)
量身修改训练好的高级模型
model_ft.fc = nn.Linear(num_ftrs, 2)
调试优化算法的LR的用途用法结构
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
train_model定制训练函数的结构
for epoch in range(num_epochs):
for phase in ['train', 'val']:
for data in dataloders[phase]:
# -0.26s val
scheduler.step和model.train用法
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
for base_lr in self.base_lrs]
def train(self, mode=True):
"""Sets the module in training mode.
This has any effect only on modules such as Dropout or BatchNorm.
self.training = mode
for module in self.children():
module.train(mode)
return self
借用的高级模型的大部分参数如何保持不变
param.requires_grad = False
训练完成后画一个批量的图
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
如何构建自己的dataset class
from torch.utils.data import TensorDataset, DataLoader
train_dataset = TensorDataset(train_features.data, train_targets.data)
train_loader = DataLoader(train_dataset, batch_size=64,
shuffle=True, num_workers=1)
让pytorch使用tensorboard 代码文档
让pytorch使用tensorboard
1. torchvision.datasets.MNIST()
1. iter(data_loader): 构建iterator
2. tensor.view == np.reshape
3. argmax.squeeze() 去除(n, m, 1)中的1
4. tensor.float(): 改变type
5. logger:
plot curves: loss, acc are scalar;
plot histogram: params, grads, np.array;
plot images: from tensor to (m, h, w)
AI-challenger stock
数据处理准备 代码文档 冗长解读
模型1:训练代码流程 Training代码 冗长解读
模型2:Predict代码 解读