본문 바로가기

Data/Data Science

[Tensorflow] 텐서플로우를 이용한 간단한 RNN 코딩

반응형

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
 
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
 
learning_rate = 0.001
training_iters = 100000
batch_size = 128
display_step = 10
 
n_input = 28
n_steps = 28
n_hidden = 128
n_classes = 10
 
= tf.placeholder(tf.float32, [None, n_steps, n_input])
= tf.placeholder(tf.float32, [None, n_classes])
 
weights = tf.Variable(tf.random_normal([n_hidden, n_classes]))
biases = tf.Variable(tf.random_normal([n_classes]))
 
= tf.transpose(x, [102])
'''
transpose 의 두가지 사용법
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x)  # [[1, 4]
                 #  [2, 5]
                 #  [3, 6]]
 
tf.transpose(x, perm=[0, 2, 1])  # [[[1,  4],
                                 #   [2,  5],
                                 #   [3,  6]],
                                 #  [[7, 10],
                                 #   [8, 11],
                                 #   [9, 12]]]
'''
= tf.reshape(x, [-1, n_input])
= tf.split(0, n_steps, x)
 
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
outputs, states = tf.nn.rnn(lstm_cell, x, dtype = tf.float32)
pred = tf.matmul(outputs[-1], weights) + biases
 
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
train = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
 
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
 
init = tf.initialize_all_variables()
 
with tf.Session() as sess:
    sess.run(init)
    step = 1
 
    while step * batch_size < training_iters:
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        batch_x = batch_x.reshape((batch_size, n_steps, n_input))
 
        sess.run(train, feed_dict= )
        if step % display_step == 0:
            acc = sess.run(accuracy, feed_dict = )
            loss = sess.run(cost, feed_dict = )
            print("step: %d, acc: %f" % (step, acc))
 
        step += 1
    print("train complete!")
 
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("test accuracy: ", sess.run(accuracy, feed_dict=))
cs

 

반응형