본문 바로가기

Data/Data Science

[Pytorch] Basic Neural Network

목차

    반응형

    Neural Network 기본틀 저장

    import torch
    import torch.nn.functional as F
    
    # 모델선언
    class NeuralNet(torch.nn.Module):
        def __init__(self, input_size, hidden_size):
            super(NeuralNet, self).__init__()
            self.input_size = input_size
            self.hidden_size = hidden_size
            self.linear_1 = torch.nn.Linear(self.input_size, self.hidden_size)
            self.linear_2 = torch.nn.Linear(self.hidden_size, 1)
        
        def forward(self, input_tensor):
            linear1 = F.relu(self.linear_1(input_tensor))
            output = F.sigmoid(self.linear_2(linear1))
            return output
            
    # hypermarameter 선언
    model = NeuralNet(2,5)
    learning_rate = 0.03
    criterion = torch.nn.BCELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
    
    # 샘플 데이터
    x_test = torch.FloatTensor([1, 1])
    y_test = torch.FloatTensor([-1])
    
    # 출력
    print(f"Model Output : {model(x_test)}")
    print(f"Calc Loss Value : {criterion(model(x_test), y_test)}")
    
    
    """
    출력
    Model Output : tensor([0.5407], grad_fn=<SigmoidBackward>)
    Calc Loss Value : 0.9411621689796448
    """

     

    DNN 모델 저장

    class Net(nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.fc1 = nn.Linear(784, 256)
            self.fc2 = nn.Linear(256, 128)
            self.fc3 = nn.Linear(128, 10)
            
        def forward(self, x):
            x = x.view(-1, 784)
            x = F.relu(self.fc1(x))
            x = F.relu(self.fc2(x))
            x = self.fc3(x)
            return x
        
    model = Net().to(device)
    optimizer = optim.Adagrad(model.parameters(), lr = 0.01)

     

    반응형