본문 바로가기

Python/DeepLearning

TensorFlow 교육 - 2일차

1. TensorFlow Example (초급)

# -*- coding: utf-8 -*-
import tensorflow as tf
.............

#hello ='Hello, TensorFlow!!'
#print(hello)

hello = tf.constant('Hello, TensorFlow!!')
print(hello)
Out : Tensor("Const_1:0", shape=(), dtype=string)
........

sess = tf.Session()

print(hello.eval(session=sess))
Out : b'Hello, TensorFlow!!'

sess.run(hello)
Out: b'Hello, TensorFlow!!'

tf.constant : 변수 생성

sess = tf.Session() : 세션 생성

# -*- coding: utf-8 -*-
import tensorflow as tf

va = tf.Variable(5.0, name='va')
pa = tf.placeholder(tf.float32, name='pa')
#print(pa)

sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(va.eval(sess))
#print(pa.eval(sess))    #error
#pa가 없기 때문에 무조건 error
t = pa + 1.0
print(t.eval(session=sess, feed_dict={pa:8.5} ))

print('-----------------')

ta = tf.placeholder(tf.float32, 3)
tb = tf.placeholder(tf.float32, 1)
tc = tf.multiply(ta, tb)
print(sess.run(tc, feed_dict={ta:[1.0,2.0,3.3], tb:[3]}))
print('-----------------')

- tf.Variable(tf.zefos(5, int32)는 배열에 대해 shape를 정해줄 수 있음

- tf.constant와 tf.Variable는 메모리 할당의 차이점이 있다. (Variable 메모리 X)

- Computation graph를 만들 때에는 tensor 형식이여야 하며 이름도 지어주어야 한다.

# https://www.tensorflow.org/api_docs/python/tf/

import tensorflow as tf

vl = tf.local_variables()
vl = [[1,10],20,30]
print(vl)

vg = tf.Variable(tf.zeros(3, dtype=tf.int32), name='vg')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(vg))

td = tf.zeros((3,2))
print(sess.run(td))    # [[0. 0.][0. 0.][0. 0.]]

ta = tf.placeholder(tf.float32, (2,2))
tb = tf.placeholder(tf.float32, (1,2))
tc = tf.multiply(ta, tb)
print(sess.run(tc, feed_dict={ta:[[1.0,2.0],[3.,4.]], tb:[[1.1,2.3]]}))
sess.close()

tensor board를 사용하기 위해 anaconda를 켜고 내가 설정한 가상 환경을 연다.

(base) C:\Users\kosta>activate tutorial

(tutorial) C:\Users\kosta>cd C:\AI_CTF\ai-dl1-master

(tutorial) C:\AI_CTF\ai-dl1-master>dir

(tutorial) C:\AI_CTF\ai-dl1-master\ch03>tensorboard --logdir=./tb1
TensorBoard 1.13.1 at http://DESKTOP-LUQ23MA:6006 (Press CTRL+C to quit)

 

# tensorboard  ex5
import tensorflow as tf

tf.reset_default_graph()

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

add = tf.add(X, Y)
mul = tf.multiply(X, Y)

# step 1: node 선택
add_hist = tf.summary.scalar('add_scalar', add)
mul_hist = tf.summary.scalar('mul_scalar', mul)

# step 2: summary 통합. 두 개의 코드 모두 동작.
merged = tf.summary.merge_all()
# merged = tf.summary.merge([add_hist, mul_hist])

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # step 3: writer 생성
    writer = tf.summary.FileWriter('./tb5', sess.graph)

    for step in range(100):
        # step 4: 노드 추가
        summary = sess.run(merged, feed_dict={X: step * 1.0, Y: 2.0})
        writer.add_summary(summary, step)

 잘 열리지 않을 때에는 서버를 만들 때 절대경로로 만들도록 한다.

 

2. Boston 집 값 확인하기

# TensorFlow Linear Regression Example
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt 
from sklearn import datasets

tf.reset_default_graph()

boston = datasets.load_boston()
boston_slice = [x[5] for x in boston.data]  #6번째 피처만 사용 (방의 개수)

data_x = np.array(boston_slice).reshape(-1,1)
data_y = boston.target.reshape(-1,1) 

n_sample = data_x.shape[0]  # (506,1) -> 506
x = tf.placeholder(tf.float32, shape=(n_sample, 1), name='x')  #feature
y = tf.placeholder(tf.float32, shape=(n_sample, 1), name='y')  #target
w = tf.Variable(tf.zeros((1,1)), name='weights') 
b = tf.Variable(tf.zeros((1,1)), name='bias')

y_pred = tf.matmul(x,w) + b  #모델

loss = tf.sqrt(tf.reduce_mean(tf.square(y_pred - y)))  #손실함수
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)  #최적화클래스(함수)
train_op = optimizer.minimize(loss)  #최적화함수, 손실함수의 최소값 찾기
summary_op = tf.summary.scalar('loss', loss)  #시각화를위한 서머리함수, 손실함수의 변화를 기록함

def plot_graph(y, fout):
    '''데이터 플롯을 위한 함수:입력값(피처값), 출력값(집값)'''
    plt.scatter(data_x.reshape(1, -1)[0], boston.target.reshape(1, -1)[0])
    plt.plot(data_x.reshape(1, -1)[0], y.reshape(1, -1)[0])
    plt.savefig(fout)
    plt.clf()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    summary_writer = tf.summary.FileWriter('./ex7', sess.graph)
    y_pred_before = sess.run(y_pred, {x: data_x})
    plot_graph(y_pred_before, 'before.png')

    # 최적화함수를 이용하여 기울기를 8000번 업데이트
    for i in range(8000):
        loss_rst, summary_rst, _ = sess.run([loss, summary_op, train_op], feed_dict={x: data_x, y: data_y})
        summary_writer.add_summary(summary_rst, i)
        if i % 100 == 0:
            print('loss : %4.4f' % loss_rst.mean())
            y_pred_after = sess.run(y_pred, {x: data_x})

    y_pred_after = sess.run(y_pred, {x: data_x})
    plot_graph(y_pred_after, 'after.png')
    
    print('Predict Score => ')
    test_x = [6.575, 6.421, 7.001]
    test_y = test_x * w + b
    print(sess.run(test_y))