Uso avanzado de la biblioteca PYTORCH: desde la preparación de datos hasta la visualización

PyTorch es una moderna biblioteca de aprendizaje automático de código abierto desarrollada por Facebook. Al igual que otras bibliotecas populares como TensorFlow y Keras, PyTorch le permite utilizar el poder de procesamiento de las tarjetas de video, calcular automáticamente un gráfico de cálculo, diferenciarlo y leerlo. Pero, a diferencia de las bibliotecas anteriores, tiene una funcionalidad más flexible debido al hecho de que utiliza un gráfico de cálculo dinámico.





PyTorch. , , . , . , , , PyTorch. , .





:





import torch
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
import copy
import datetime as dt
import pandas as pd
import numpy as np
      
      



 make_moons   sklearn, . ( noise)





X, y = make_moons(n_samples=150, random_state=33, noise=0.2)
      
      



 pandas.DataFrame  :





df = pd.DataFrame(X, columns=['x1', 'x2'])
df['target'] = y
df.head(5)
      
      



6:4  train_test_split   sklearn





X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=33)
      
      



:





fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))

ax1.scatter(X_train[:,0][y_train == 0], X_train[:,1][y_train == 0], c='red', marker='o', edgecolors = 'black', alpha = 0.6)
ax1.scatter(X_train[:,0][y_train == 1], X_train[:,1][y_train == 1], c='green', marker = 'o', edgecolors = 'black', alpha = 0.6)
ax1.set_title('Train', fontsize=20)

ax2.scatter(X_test[:,0][y_test == 0], X_test[:,1][y_test == 0], c='red', marker='o', edgecolors = 'black', alpha = 0.6)
ax2.scatter(X_test[:,0][y_test == 1], X_test[:,1][y_test == 1], c='green', marker = 'o', edgecolors = 'black', alpha = 0.6)
ax2.set_title('Test', fontsize=20);
      
      



 MyDataset,  torch.utils.data.Dataset. __len  __getitem, -.





class MyDataset(Dataset):
  
    def __init__(self, X, y):
        self.X = torch.Tensor(X)
        self.y = torch.from_numpy(y).float()

    def __len__(self):
        return self.X.shape[0]
  
    def __getitem__(self, index):
        return (self.X[index], self.y[index])
      
      



. ReLU, — .  x1  x2  50 . 50 50 . 50 0 1 ( «1»).





import torch.nn as nn

ReLU = nn.ReLU()

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(2, 50)
        self.fc2 = nn.Linear(50, 50)
        self.fc3 = nn.Linear(50, 1)

    def forward(self, x):
        x = ReLU(self.fc1(x))
        x = ReLU(self.fc2(x))
        x = torch.sigmoid(self.fc3(x))
        return x.view(-1)
      
      



. , \ .





net = Net()
print(net)
      
      



Net(   
(fc1): Linear(in_features=2, out_features=50, bias=True)   
(fc2): Linear(in_features=50, out_features=50, bias=True)   
(fc3): Linear(in_features=50, out_features=1, bias=True) 
)
      
      



, ,  sklearn. , , . .





In [9]:
class Trainer():
    """
    Parameters:
        dataset:  ,  
        loss_f:  
        learning_rate:   
        epoch_amount:   
        batch_size:   
        max_batches_per_epoch:   , 
                                    
        device:   
        early_stopping:       
        optim: 
        scheduler:   
        permutate:     

    Attributes:
        start_model:  
        best_model: ,  
        train_loss:       
                       
        val_loss:       
                     

    Methods:
        fit:  
        predict:    

    """
    def __init__(self,  dataset, net, loss_f, learning_rate=1e-3, 
                epoch_amount=10, batch_size=12, 
                max_batches_per_epoch=None,
                device='cpu', early_stopping=10, 
                optim=torch.optim.Adam, 
                scheduler=None, permutate=True):
        
        self.loss_f = loss_f
        self.learning_rate = learning_rate
        self.epoch_amount = epoch_amount
        self.batch_size = batch_size
        self.max_batches_per_epoch = max_batches_per_epoch
        self.device = device
        self.early_stopping = early_stopping
        self.optim = optim
        self.scheduler = scheduler
        self.permutate = permutate
        self.dataset = dataset
        self.start_model = net
        self.best_model = net

        self.train_loss = []
        self.val_loss = []

    def predict(self, X):
        return self.best_model(X)

    def fit(self, X_train, X_test, y_train, y_test):

        Net = self.start_model
            
        device = torch.device(self.device)

        Net.to(self.device)

        optimizer = self.optim(Net.parameters(), lr=self.learning_rate)
        
        if self.scheduler is not None:
            scheduler = self.scheduler(optimizer)

        train = self.dataset(X_train, y_train)
        val = self.dataset(X_test, y_test)  

        train = DataLoader(train, batch_size=self.batch_size, shuffle=self.permutate) 
        val = DataLoader(val, batch_size=self.batch_size, shuffle=False)

        best_val_loss = float('inf') #       
                                     #     
        best_ep = 0                  # ,     
                                     #      

        for epoch in range(self.epoch_amount): 
            start = dt.datetime.now()
            print(f': {epoch}', end=' ')
            Net.train()
            mean_loss = 0
            batch_n = 0

            for batch_X, target in train:
                if self.max_batches_per_epoch is not None:
                    if batch_n >= self.max_batches_per_epoch:
                        break
                optimizer.zero_grad()

                batch_X = batch_X.to(self.device)
                target = target.to(self.device)

                predicted_values = Net(batch_X)
                loss = self.loss_f(predicted_values, target)
                loss.backward()
                optimizer.step()

                mean_loss += float(loss)
                batch_n += 1
        
            mean_loss /= batch_n
            self.train_loss.append(mean_loss)
            print(f'Loss_train: {mean_loss}, {dt.datetime.now() - start} ')

            Net.eval()
            mean_loss = 0
            batch_n = 0

            with torch.no_grad():
                for batch_X, target in val:
                    if self.max_batches_per_epoch is not None:
                        if batch_n >= self.max_batches_per_epoch:
                            break
                batch_X = batch_X.to(self.device)
                target = target.to(self.device)

                predicted_values = Net(batch_X)
                loss = self.loss_f(predicted_values, target)

                mean_loss += float(loss)
                batch_n += 1
        
            mean_loss /= batch_n
            self.val_loss.append(mean_loss)
            print(f'Loss_val: {mean_loss}')

            if mean_loss < best_val_loss:
                self.best_model = Net
                best_val_loss = mean_loss
                best_ep = epoch
            elif epoch - best_ep > self.early_stopping:
                print(f'{self.early_stopping}  .  ...')
                break
            if self.scheduler is not None:
                scheduler.step()
            print()
      
      



__init__.  train_loss  val_loss, .





 predict  .  fit, . . , (‘cpu’ —  ‘cuda:*’, * . , ).





 torch.optim, . ,  torch.optim.Adam.  torch.optim.lr_scheduler, .





, (  MyDataset).





torch.utils.data.DataLoader, ( ). torch.utils.data.DataLoader torch.Tensor torch.utils.data.Dataset ( MyDataset) .





, .  train. (-, .). .





,  train_dataloader. , . , .. PyTorch . : , . , .





.  eval  . , , . . , ,  best_model.

,  epoch_amount, (  early_stopping).





, , .





. - torch.nn.BCELoss, —  torch.optim.SGD.





 fit.





params = {
    'dataset': MyDataset,
    'net': net,
    'epoch_amount': 1000, 
    'learning_rate': 1e-2,
    'early_stopping': 25,
    'loss_f': nn.BCELoss(),
    'optim': torch.optim.SGD,
}

clf = Trainer(**params)
clf.fit(X_train, X_test, y_train, y_test)
      
      



.





.





def plot_loss(Loss_train, Loss_val):
    plt.figure(figsize=(12, 5))
    plt.plot(range(len(Loss_train)), Loss_train, color='orange', label='train', linestyle='--')
    plt.plot(range(len(Loss_val)), Loss_val, color='blue', marker='o', label='val')
    plt.legend()
    plt.show()

plot_loss(clf.train_loss, clf.val_loss)
      
      



.





def make_meshgrid(x1, x2, h=.02):
    x1_min, x1_max = x1.min() - 2, x1.max() + 2
    x2_min, x2_max = x2.min() - 2, x2.max() + 2
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, h), np.arange(x2_min, x2_max, h))
    return xx1, xx2

def plot_contours(ax, xx1, xx2, **params):
    C = clf.predict(torch.Tensor(np.c_[xx1.ravel(), xx2.ravel()])).detach().numpy()
    C = C.reshape(xx1.shape)
    out = ax.contourf(xx1, xx2, C, **params)
    return out

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))

xx1, xx2 = make_meshgrid(X[0], X[1])

ax1.scatter(X_train[:,0][y_train == 0], X_train[:,1][y_train == 0], c='red', marker='o', edgecolors = 'black', alpha = 0.6)
ax1.scatter(X_train[:,0][y_train == 1], X_train[:,1][y_train == 1], c='green', marker = 'o', edgecolors = 'black', alpha = 0.6)
ax1.set_title('Train', fontsize=20)
plot_contours(ax1, xx1, xx2, alpha=0.2)

ax2.scatter(X_test[:,0][y_test == 0], X_test[:,1][y_test == 0], c='red', marker='o', edgecolors = 'black', alpha = 0.6)
ax2.scatter(X_test[:,0][y_test == 1], X_test[:,1][y_test == 1], c='green', marker = 'o', edgecolors = 'black', alpha = 0.6)
ax2.set_title('Test', fontsize=20)
plot_contours(ax2, xx1, xx2, alpha=0.2);
      
      



, «1» . , «0» . , , .. .








All Articles