参考地址:https://ethereon.github.io/netscope/#/preset/vgg-16

按照上面的图来写即可。

SRE实战 互联网时代守护先锋,助力企业售后服务体系运筹帷幄!一键直达领取阿里云限量特价优惠。

论文地址:https://arxiv.org/pdf/1409.1556.pdf

// Define a new Module.
struct Net : torch::nn::Module {
    Net() {
        conv1_1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 64, { 3,3 }).padding(1));
        conv1_2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(64, 64, { 3,3 }).padding(1));
        conv2_1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(64, 128, { 3,3 }).padding(1));
        conv2_2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(128, 128, { 3,3 }).padding(1));
        conv3_1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(128, 256, { 3,3 }).padding(1));
        conv3_2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(256, 256, { 3,3 }).padding(1));
        conv3_3 = torch::nn::Conv2d(torch::nn::Conv2dOptions(256, 256, { 3,3 }).padding(1));
        conv4_1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(256, 512, { 3,3 }).padding(1));
        conv4_2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(512, 512, { 3,3 }).padding(1));
        conv4_3 = torch::nn::Conv2d(torch::nn::Conv2dOptions(512, 512, { 3,3 }).padding(1));
        conv5_1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(512, 512, { 3,3 }).padding(1));
        conv5_2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(512, 512, { 3,3 }).padding(1));
        conv5_3 = torch::nn::Conv2d(torch::nn::Conv2dOptions(512, 512, { 3,3 }).padding(1));

        fc1 = torch::nn::Linear(512*7*7,4096);
        fc2 = torch::nn::Linear(4096, 4096);
        fc3 = torch::nn::Linear(4096, 1000);
    }

    // Implement the Net's algorithm.
    torch::Tensor forward(torch::Tensor x) {
        x = conv1_1->forward(x);
        x = torch::relu(x);
        x = conv1_2->forward(x);
        x = torch::relu(x);
        x = torch::max_pool2d(x, { 2,2 }, { 2,2 });

        x = conv2_1->forward(x);
        x = torch::relu(x);
        x = conv2_2->forward(x);
        x = torch::relu(x);
        x = torch::max_pool2d(x, { 2,2 }, { 2,2 });

        x = conv3_1->forward(x);
        x = torch::relu(x);
        x = conv3_2->forward(x);
        x = torch::relu(x);
        x = conv3_3->forward(x);
        x = torch::relu(x);
        x = torch::max_pool2d(x, { 2,2 }, { 2,2 });

        x = conv4_1->forward(x);
        x = torch::relu(x);
        x = conv4_2->forward(x);
        x = torch::relu(x);
        x = conv4_3->forward(x);
        x = torch::relu(x);
        x = torch::max_pool2d(x, { 2,2 }, { 2,2 });

        x = conv5_1->forward(x);
        x = torch::relu(x);
        x = conv5_2->forward(x);
        x = torch::relu(x);
        x = conv5_3->forward(x);
        x = torch::relu(x);
        x = torch::max_pool2d(x, { 2,2 }, { 2,2 });

        x = x.view({ x.size(0), -1 });//512x7x7 = 25088

        x = fc1->forward(x);
        x = torch::relu(x);
        x = torch::dropout(x, 0.5, is_training());

        x = fc2->forward(x);
        x = torch::relu(x);
        x = torch::dropout(x, 0.5, is_training());

        x = fc3->forward(x);

        x = torch::log_softmax(x, 1);

        return x;
    }

    // Use one of many "standard library" modules.
    torch::nn::Conv2d conv1_1{ nullptr };
    torch::nn::Conv2d conv1_2{ nullptr };
    torch::nn::Conv2d conv2_1{ nullptr };
    torch::nn::Conv2d conv2_2{ nullptr };
    torch::nn::Conv2d conv3_1{ nullptr };
    torch::nn::Conv2d conv3_2{ nullptr };
    torch::nn::Conv2d conv3_3{ nullptr };
    torch::nn::Conv2d conv4_1{ nullptr };
    torch::nn::Conv2d conv4_2{ nullptr };
    torch::nn::Conv2d conv4_3{ nullptr };
    torch::nn::Conv2d conv5_1{ nullptr };
    torch::nn::Conv2d conv5_2{ nullptr };
    torch::nn::Conv2d conv5_3{ nullptr };
    torch::nn::Linear fc1{ nullptr };
    torch::nn::Linear fc2{ nullptr };
    torch::nn::Linear fc3{ nullptr };
};
扫码关注我们
微信号:SRE实战
拒绝背锅 运筹帷幄