网络结构:1输入层+2隐藏层+1输出层

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("home/wmy", one_hot=True)
#download data to file

import tensorflow as tf
learning_rate = 0.001
train_epochs = 20
batch_size = 64
n_input = 784
n_hidden1 = 100
n_hidden2 = 100
n_classes = 10
#1个输入层,2个隐藏层,1个输出层
x = tf.placeholder(tf.float32, shape=[None, n_input])
y = tf.placeholder(tf.float32, shape=[None, n_classes])
#设置两个占位符,输入样本的特征和标签
#初始化权重和偏置,以正态分布初始化
weights = {'h1': tf.Variable(tf.random_normal([n_input, n_hidden1])),
           'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2])),
           'out': tf.Variable(tf.random_normal([n_hidden2, n_classes]))}

biases = {'b1': tf.Variable(tf.random_normal([n_hidden1])),
          'b2': tf.Variable(tf.random_normal([n_hidden2])),
          'out': tf.Variable(tf.random_normal([n_classes]))}
#前向通道
def inference(input_x):
    layer_1 = tf.nn.relu(tf.matmul(x, weights['h1']) + biases['b1'])
    layer_2 = tf.nn.relu(tf.matmul(layer_1, weights['h2']) + biases['b2'])
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer

logits = inference(x)
prediction = tf.nn.softmax(logits)  #采用softmax激活函数

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)   #定义一个train_op,以最小化损失函数为目标
predict_num=tf.argmax(prediction, 1)
real_num=tf.argmax(y, 1)
pre_correct = tf.equal(predict_num, real_num)
accuracy = tf.reduce_mean(tf.cast(pre_correct, tf.float32))

init = tf.global_variables_initializer()
saver=tf.train.Saver()
#初始化 #开启一个会话 with tf.Session() as sess: sess.run(init) total_batch = int(mnist.train.num_examples / batch_size) for epoch in range(train_epochs): for batch in range(total_batch): batch_x, batch_y = mnist.train.next_batch(batch_size) sess.run(train_op, feed_dict={x:batch_x, y:batch_y}) if epoch % 10 == 0: loss_, acc = sess.run([loss, accuracy], feed_dict={x:batch_x, y:batch_y}) print("epoch {}, loss {:.4f}, acc {:.3f}".format(epoch, loss_, acc)) #每10步计算一次准确度 saver.save(sess,"/models",write_meta_graph=True) print("optimizer finished!") #计算测试集的准确度 test_acc = sess.run(predict_num, feed_dict={x:mnist.test.images[0:10,:]}) print('predict_num', test_acc) print('real_num',sess.run(tf.argmax(mnist.test.labels [0:10,:],1)))

运行该程序,将下载minist数据集到指定目录,训练20个轮次,输出10个数字的测试结果

SRE实战 互联网时代守护先锋,助力企业售后服务体系运筹帷幄!一键直达领取阿里云限量特价优惠。

tensorflow训练minist 随笔 第1张

 

保存模型:

在会话前先定义一个saver类,saver=tf.train.Saver()

在会话中,训练模型结束后,保存模型。

saver.save(sess, "mnist", write_meta_graph=True)

调用模型:

import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import os
mnist = input_data.read_data_sets("home/wmy", one_hot=True)


tf.reset_default_graph()

with tf.Session() as sess:
    # tf.saved_model.loader.load(sess,[tf.saved_model.tag_constants.TRAINING],'./mnist')
    import_meta = tf.train.import_meta_graph("./mnist.meta")
    import_meta.restore(sess,tf.train.latest_checkpoint("./"))
    print("导入模型成功")
    input_x = sess.graph.get_tensor_by_name('input_x:0')  # 输入数据
    # input_y=sess.graph.get_tensor_by_name('placeholder_1:0')   #标签
    output = sess.graph.get_tensor_by_name('output:0')

    test_acc = sess.run(tf.argmax(output, 1), feed_dict={input_x: mnist.test.images[0:10, :]})
    print('predict_num', test_acc)
    print('real_num', sess.run(tf.argmax(mnist.test.labels[0:10, :], 1)))

运行程序,得到如下结果:

tensorflow训练minist 随笔 第2张

模型导入成功,预测结果正确。

 

扫码关注我们
微信号:SRE实战
拒绝背锅 运筹帷幄