python 3.6 tensorflow 1.6
1、线性回归
import tensorflow as tfimport numpy as np"""流程 随机产生一组样本(矩阵),x (2,100) y (100),给定w (1,2) b 要求W和B 用来拟合w,b""""""数据准备,使用numpy随机获得数据"""normal_data = np.random.rand(2, 100) # 获取随机数据random_data = normal_data.reshape(2, 100)# print(random_data)# random_data.dtype = np.float32 # 这是错误的!!!! 不能通过该方法转变数据长度x_data = random_data.astype(np.float32) # 只能通过这种方法转换数据类型# print(x_data)a_data = np.array([0.100, 0.200]).reshape(1, 2) # 转变矩阵shapey_data = np.dot(a_data, x_data) + 0.300 # dot函数是np中的矩阵乘法 广播加法# print(y_data)"""使用tensorflow,构造一个线性模型。"""b = tf.Variable(0.0, dtype=np.float32) # 定义一个一维变量,若被初始化,则为0uniform_data = tf.random_uniform([1, 2], -1.0, 1.0) # 定义一个变量,若变量被初始化,则里面数据符合正态分布W = tf.Variable(np.array([0.1000001, 0.20000015], dtype=np.float32).reshape(1, 2), dtype=np.float32) # 定义一个(1,2)维变量y = tf.matmul(W, x_data) + b # matmul 和dot 都是向量相乘?这里不能获得结果,只能获得一个tensor.即数据的转变过程# print(y)"""求解线性模型"""# 设置损失函数:误差的均方差loss = tf.reduce_mean(tf.square(y - y_data))# 选择梯度下降的方法# learning_rate = 1e-3global_step = tf.Variable(0, trainable=False)starter_learning_rate = 0.1learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.96, staircase=True)optimizer = tf.train.GradientDescentOptimizer(learning_rate) # 定义一个梯度下降优化器# 迭代的目标:最小化损失函数train = optimizer.minimize(loss)############################################################# 以下是用 tf 来解决上面的任务# 1.初始化变量:tf 的必备步骤,主要声明了变量,就必须初始化才能用init = tf.global_variables_initializer()# 设置tensorflow对GPU的使用按需分配config = tf.ConfigProto()config.gpu_options.allow_growth = True# 2.启动图 (graph)with tf.Session(config=config) as sess: sess.run(init) # print(sess.run(W)) # print(sess.run(b)) # print(x_data) # print(sess.run(y)) # 3.迭代,反复执行上面的最小化损失函数这一操作(train op),拟合平面 for step in range(0, 20000): sess.run(train) if step % 20 == 0: los = sess.run(y - y_data) W_value = W.eval(sess) b_value = b.eval(sess) print(sess.run(learning_rate)) # 看样子不会变 print(step, W_value, b_value) # 得到最佳拟合结果 W: [[0.100 0.200]], b: [0.300]
2,求和
import tensorflow as tfinput1 = tf.constant(2.0)input2 = tf.constant(3.0)input3 = tf.constant(5.0)intermd = tf.add(input1, input2)mul = tf.multiply(intermd, input3)with tf.Session() as sess: print(sess.run(intermd)) print(sess.run(mul)) result = sess.run([mul, intermd]) print(result) print(type(result)) print(type(result[0]))
3、变量常量
import tensorflow as tf# 创建变量,初始化为0state = tf.Variable(0, name="counter")# 创建一个 op , 其作用是时 state 增加 1one = tf.constant(1) # 直接用 1 也就行了new_value = tf.add(state, 1)update = tf.assign(state, new_value) #修改变量的值# 启动图之后, 运行 update opwith tf.Session() as sess: # 创建好图之后,变量必须经过‘初始化’ sess.run(tf.global_variables_initializer()) # 查看state的初始化值 print(sess.run(state)) for _ in range(3): print(sess.run(update)) # 这样子每一次运行state 都还是1 print(sess.run(state))
3、常用操作
import tensorflow as tf"""对一下数组的平方求和之后再求平均值"""h_vec = tf.constant([1.0, 2.0, 3.0, 4.0], dtype=tf.float32)"""首先计算各个数的平方的和"""tens_sq_sum = tf.Variable(initial_value=0, dtype=tf.float32)with tf.Session() as sess: sess.run(tf.global_variables_initializer()) constant = sess.run(h_vec) constant_length = len(constant) # 获得总长度 for i in range(constant_length): num = constant[i] sq_num = tf.square(num) tmp_value = tf.add(tens_sq_sum, sq_num) tens_sq_sum = tf.assign(tens_sq_sum, tmp_value) print(sess.run(tens_sq_sum))"""另一种方法"""with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sq = sess.run(tf.square(h_vec)) len = len(sq) sum = 0 for i in range(len): sum = sum + sq[i] print(sum)
5、InteractiveSession
import tensorflow as tfa = tf.constant(1.0)b = tf.constant(2.0)c = a + b# 下面的两种情况是等价的with tf.Session(): print(c.eval())sess = tf.InteractiveSession()print(c.eval())sess.close()a = tf.constant(1.0)b = tf.constant(2.0)c = tf.Variable(3.0)d = a + bsess = tf.InteractiveSession()sess.run(tf.global_variables_initializer())#################### 这样写是错误的# print a.run()# print d.run()##################### 这样才是正确的print(a.eval())print(d.eval())# run() 方法主要用来x = tf.Variable(1.2)# print x.eval() # 还没初始化,不能用x.initializer.run() # x.initializer 就是一个初始化的 op, op才调用run() 方法print(x.eval())sess.close()h_sum = tf.Variable(0.0, dtype=tf.float32)# h_vec = tf.random_normal(shape=([10]))h_vec = tf.constant([1.0, 2.0, 3.0, 4.0])# 把 h_vec 的每个元素加到 h_sum 中,然后再除以 10 来计算平均值# 待添加的数h_add = tf.placeholder(tf.float32)# 添加之后的值h_new = tf.add(h_sum, h_add)# 更新 h_new 的 opupdate = tf.assign(h_sum, h_new)sess = tf.InteractiveSession()sess.run(tf.global_variables_initializer())print('s_sum =', h_sum.eval())print("vec = ", h_vec.eval())print("vec = ", h_vec[0].eval())for _ in range(4): update.eval(feed_dict={h_add: h_vec[_].eval()}) print('h_sum =', h_sum.eval())sess.close()
6、变量赋值操作
import tensorflow as tfinput1 = tf.placeholder(tf.float32)input2 = tf.placeholder(tf.float32)output = tf.multiply(input1, input2)with tf.Session() as sess: print(sess.run([output], feed_dict={input1: [7.0], input2: [2.0]}))with tf.Session() as sess: result = sess.run(output, feed_dict={input1: 7.0, input2: 2.0}) print(type(result)) print(result)