1
0
Fork 0
This repository has been archived on 2024-02-17. You can view files and clone it, but cannot push or open issues or pull requests.
TP_IA/main.py

130 lines
3.5 KiB
Python

from os.path import isfile
import torch
from numpy import prod
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
def get_data(batch_size: int = 64):
# Download training data from open datasets.
training_data = datasets.CIFAR10(
root="/home/flifloo/IA/data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
testing_data = datasets.CIFAR10(
root="/home/flifloo/IA/data",
train=False,
download=True,
transform=ToTensor(),
)
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(testing_data, batch_size=batch_size, shuffle=True)
return train_dataloader, test_dataloader
def generate_layers(inp: int, output: int):
layers = 2
conns = (inp+output)*2
stack = [nn.Linear(inp, conns), nn.ReLU()]
print(f"input: {inp}, output: {output}, layers: {layers}, conns: {conns}")
print("Generating stack...")
for _ in range(layers):
stack.append(nn.Linear(conns, conns))
stack.append(nn.ReLU())
stack += [nn.Linear(conns, output), nn.ReLU()]
print("Stack generated")
return stack
# Define model
class NeuralNetwork(nn.Module):
def __init__(self, stack):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(*stack)
def forward(self, x):
return self.linear_relu_stack(self.flatten(x))
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
return correct
def training():
train_data, test_data = get_data()
stack = generate_layers(prod(test_data.dataset.data[0].shape), len(test_data.dataset.classes))
model = NeuralNetwork(stack).to(device)
if isfile("model.pth"):
print("Loading model from save")
model.load_state_dict(torch.load("model.pth"))
print(model)
loss_fn = nn.CrossEntropyLoss()
# lr = sur/sous appretisage
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=.9)
e = 0
c = 0
while c < 0.90:
print(f"Epoch {e+1}\n-------------------------------")
train(train_data, model, loss_fn, optimizer)
c = test(test_data, model, loss_fn)
torch.save(model.state_dict(), "model.pth")
e += 1
print("Done!")
if __name__ == '__main__':
training()