An open API service indexing awesome lists of open source software.

https://github.com/vipul2001/modern-cnns-implementation

AlexNet , VGG Blocks , Network In Network (NIN),GoogleNet,ResNet,DenseNet Using Pytorch
https://github.com/vipul2001/modern-cnns-implementation

alexnet deep-learning deep-neural-networks loss network pytorch

Last synced: 28 days ago
JSON representation

AlexNet , VGG Blocks , Network In Network (NIN),GoogleNet,ResNet,DenseNet Using Pytorch

Awesome Lists containing this project

README

        

# Modern Convolutional Networks and there Implementation using Pytorch

## Sample data Used
### Fashion MNSIT DataSet
![data](Alexnet/fashion_set_sample.png)
## AlexNet Structure
![Alexnet](Alexnet/alexnet.svg)
### code

class AlexNet(nn.Module):

def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)

def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x

## VGG Structure
![VGG](VGG_Blocks/VGG.svg)
### code

Sequential(
(0): Sequential(
(0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(1): Sequential(
(0): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(2): Sequential(
(0): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(3): Sequential(
(0): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(4): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(5): Flatten()
(6): Linear(in_features=25088, out_features=4096, bias=True)
(7): ReLU()
(8): Dropout(p=0.5, inplace=False)
(9): Linear(in_features=4096, out_features=4096, bias=True)
(10): ReLU()
(11): Dropout(p=0.5, inplace=False)
(12): Linear(in_features=4096, out_features=10, bias=True)
)

## NIN Structure
![Alexnet](Network_in_Network/NIN.svg)
## network used

Net(
(n1): Sequential(
(0): Conv2d(1, 96, kernel_size=(11, 11), stride=(4, 4))
(1): ReLU()
(2): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1))
(3): ReLU()
(4): Conv2d(96, 96, kernel_size=(1, 1), stride=(1, 1))
(5): ReLU()
)
(m1): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(n2): Sequential(
(0): Conv2d(96, 256, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): ReLU()
(2): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(3): ReLU()
(4): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))
(5): ReLU()
)
(m2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(n3): Sequential(
(0): Conv2d(256, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(384, 384, kernel_size=(1, 1), stride=(1, 1))
(3): ReLU()
(4): Conv2d(384, 384, kernel_size=(1, 1), stride=(1, 1))
(5): ReLU()
)
(m3): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(dropout1): Dropout2d(p=0.5, inplace=False)
(n4): Sequential(
(0): Conv2d(384, 10, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(10, 10, kernel_size=(1, 1), stride=(1, 1))
(3): ReLU()
(4): Conv2d(10, 10, kernel_size=(1, 1), stride=(1, 1))
(5): ReLU()
)
(avg1): AdaptiveMaxPool2d(output_size=(1, 1))
(flat): Flatten()
)

## Loss Vs Number Of epoch
![loss](Network_in_Network/loss.png)
## Prediction
![prediction](Network_in_Network/prediction.png)
# GoogleNet
## Inception block
![inception](GoogleNet/inception.svg)
## Google Net Structure
![Alexnet](GoogleNet/GoogleNet.svg)
## network used

Sequential(
(0): Sequential(
(0): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
(1): ReLU()
(2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
(1): Sequential(
(0): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))
(1): ReLU()
(2): Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
(2): Sequential(
(0): Inception(
(p1_1): Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(192, 96, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(192, 16, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1))
)
(1): Inception(
(p1_1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(256, 32, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(32, 96, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
)
(2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
(3): Sequential(
(0): Inception(
(p1_1): Conv2d(480, 192, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(480, 96, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(96, 208, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(480, 16, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(16, 48, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(480, 64, kernel_size=(1, 1), stride=(1, 1))
)
(1): Inception(
(p1_1): Conv2d(512, 160, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(512, 112, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(112, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(512, 24, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(24, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1))
)
(2): Inception(
(p1_1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(512, 24, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(24, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1))
)
(3): Inception(
(p1_1): Conv2d(512, 112, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(512, 144, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(144, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(512, 32, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(512, 64, kernel_size=(1, 1), stride=(1, 1))
)
(4): Inception(
(p1_1): Conv2d(528, 256, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(528, 160, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(160, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(528, 32, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(32, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(528, 128, kernel_size=(1, 1), stride=(1, 1))
)
(5): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
(4): Sequential(
(0): Inception(
(p1_1): Conv2d(832, 256, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(832, 160, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(160, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(832, 32, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(32, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1))
)
(1): Inception(
(p1_1): Conv2d(832, 384, kernel_size=(1, 1), stride=(1, 1))
(p2_1): Conv2d(832, 192, kernel_size=(1, 1), stride=(1, 1))
(p2_2): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(p3_1): Conv2d(832, 48, kernel_size=(1, 1), stride=(1, 1))
(p3_2): Conv2d(48, 128, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(p4_1): MaxPool2d(kernel_size=3, stride=1, padding=1, dilation=1, ceil_mode=False)
(p4_2): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1))
)
(2): AdaptiveMaxPool2d(output_size=(1, 1))
(3): Flatten()
)
(5): Linear(in_features=1024, out_features=10, bias=True)
)it

## Loss Vs Number Of epoch
![loss](GoogleNet/loss.png)
## Prediction
![prediction](GoogleNet/prediction.png)

# Batch Normalization
# Loss Function
![loss](Batch_Normalization/loss.png)
# Prediction
![prediction](Batch_Normalization/prediction.png)
# DenseNet
## Model
![](DenseNet/DenseNet.svg)
## loss
![loss](DenseNet/loss.png)
## Prediction
![](DenseNet/prediction.png)
## Train Curve
![](DenseNet/train_acc.png)
## Network

Sequential(
(0): Reshape()
(1): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
(2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU()
(4): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(5): DenseBlock(
(net): Sequential(
(0): Sequential(
(0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): Sequential(
(0): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(96, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): Sequential(
(0): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): Sequential(
(0): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(160, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
(6): Sequential(
(0): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(192, 96, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(7): DenseBlock(
(net): Sequential(
(0): Sequential(
(0): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(96, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): Sequential(
(0): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): Sequential(
(0): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(160, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): Sequential(
(0): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(192, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
(8): Sequential(
(0): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(224, 112, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(9): DenseBlock(
(net): Sequential(
(0): Sequential(
(0): BatchNorm2d(112, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(112, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): Sequential(
(0): BatchNorm2d(144, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(144, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): Sequential(
(0): BatchNorm2d(176, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(176, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): Sequential(
(0): BatchNorm2d(208, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(208, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
(10): Sequential(
(0): BatchNorm2d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(240, 120, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(11): DenseBlock(
(net): Sequential(
(0): Sequential(
(0): BatchNorm2d(120, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(120, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(1): Sequential(
(0): BatchNorm2d(152, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(152, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(2): Sequential(
(0): BatchNorm2d(184, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(184, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
(3): Sequential(
(0): BatchNorm2d(216, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(216, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
(12): BatchNorm2d(248, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(13): ReLU()
(14): AdaptiveMaxPool2d(output_size=(1, 1))
(15): Flatten()
(16): Linear(in_features=248, out_features=10, bias=True)
)