import tensorflow as tf
import numpy as np
import csv
np.set_printoptions(threshold=200)
class Dataset:
def __init__(self,data,label):
self._index_in_epoch = 0
self._epochs_completed = 0
self._data = data
self._label = label
self._num_examples = data.shape[0]
pass
@property
def data(self):
return self._data
@property
def label(self):
return self._label
def next_batch(self,batch_size,shuffle = True):
start = self._index_in_epoch
if start == 0 and self._epochs_completed == 0:
idx = np.arange(0, self._num_examples) # get all possible indexes
np.random.shuffle(idx) # shuffle indexe
self._data = self.data[idx] # get list of `num` random samples
self._label = self.label[idx]
# go to the next batch
if start + batch_size > self._num_examples:
self._epochs_completed += 1
rest_num_examples = self._num_examples - start
data_rest_part = self.data[start:self._num_examples]
label_rest_part = self.label[start:self._num_examples]
idx0 = np.arange(0, self._num_examples) # get all possible indexes
np.random.shuffle(idx0) # shuffle indexes
self._data = self.data[idx0] # get list of `num` random samples
self._label = self.label[idx0] # get list of `num` random samples
start = 0
self._index_in_epoch = batch_size - rest_num_examples #avoid the case where the #sample != integar times of batch_size
end = self._index_in_epoch
data_new_part = self._data[start:end]
label_new_part = self._label[start:end]
return np.concatenate((data_rest_part, data_new_part), axis=0), np.concatenate((label_rest_part, label_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._data[start:end], self._label[start:end]
birth_data = []
with open("mdata.csv") as csvfile:
csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件
# birth_header = next(csv_reader) # 读取第一行每一列的标题
for row in csv_reader: # 将csv 文件中的数据保存到birth_data中
birth_data.append(row)
birth_data = [[float(x) for x in row] for row in birth_data] # 将数据从string形式转换为float形式
birth_data = np.array(birth_data) # 将list数组转化成array数组便于查看数据结构
print(birth_data.shape) # 利用.shape查看结构。
data = [[row[0], row[1]] for row in birth_data]
label = [[row[2], row[3]] for row in birth_data]
#label = [[row[3]] for row in birth_data]
data = np.array(data)
label = np.array(label)
print(data.shape)
print(label.shape)
t_data = Dataset(data, label)
tf.reset_default_graph()
def weight_variable(shape, num):
w = tf.get_variable(name="weight_%d" % num, initializer=tf.random_normal_initializer(stddev=1.0, dtype=tf.float32),
shape=shape)
return w
def bias_variable(shape, num):
b = tf.get_variable(name="bias_%d" % num, initializer=tf.random_normal_initializer(stddev=1.0, dtype=tf.float32),
shape=shape)
return b
x = tf.placeholder(dtype=np.float32, shape=(None, 2), name="input_x")
y_ = tf.placeholder(dtype=np.float32, shape=(None, 2), name="input_y")
keep_prob = tf.placeholder(tf.float32, name="input_keep_prob")
w1 = weight_variable([2, 10], 1)
b1 = bias_variable([10], 1)
w2 = weight_variable([10, 5], 2)
b2 = bias_variable([5], 2)
w3 = weight_variable([5, 2], 3)
b3 = bias_variable([2], 3)
graph = tf.get_default_graph()
layer1 = tf.nn.sigmoid(tf.add(tf.matmul(x, w1), b1))
layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1, w2), b2))
y = tf.add(tf.matmul(layer2, w3), b3)
loss = tf.nn.l2_loss(y-y_)#损失函数不知道用哪个
#loss = tf.losses.mean_squared_error(y, y_)
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.00000001).minimize(loss)#学习率也没选
y_abs = tf.abs(y-y_)
less_equal=tf.less_equal(y_abs, 0.01)
one = tf.ones_like(less_equal)
zero = tf.zeros_like(less_equal)
output = tf.where(less_equal, one, zero)
acc = tf.reduce_mean(tf.cast(output, tf.float32)) #判断准确率这部分也有问题
with tf.Session() as sess:
tf.global_variables_initializer().run()
epoches = 100
batch_size = 500000
for epoch in range(epoches):
for i in range(0, 7000000-batch_size, batch_size):
x_batch, y_batch = t_data.next_batch(batch_size)
_, accurcy = sess.run([train_op, acc], feed_dict={x:x_batch, y_:y_batch, keep_prob:1})
if 0 == i%5:
print("acc={0}".format(accurcy))
x_t, y_t = t_data.next_batch(50)
print("test_acc={0}".format(acc.eval(feed_dict={x:x_t, y_:y_t, keep_prob:1})))
print(x_t)
print(y.eval(feed_dict={x:x_t, y_:y_t, keep_prob:1}))
微信扫码关注
更新实时通知