구글 드라이브 마운트

Open In Colab

from google.colab import drive
drive.mount('/gdrive', force_remount=True)
# /gdrive/My Drive/ (폴더명)
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly

Enter your authorization code:
··········
Mounted at /gdrive
# 그림파일로 렌더링 하도록 패키지 설정
!apt-get install python-opengl -y
!apt install xvfb -y
!pip install pyvirtualdisplay
!pip install piglet
Reading package lists... Done
Building dependency tree       
Reading state information... Done
Suggested packages:
  libgle3
The following NEW packages will be installed:
  python-opengl
0 upgraded, 1 newly installed, 0 to remove and 31 not upgraded.
Need to get 496 kB of archives.
After this operation, 5,416 kB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 python-opengl all 3.1.0+dfsg-1 [496 kB]
Fetched 496 kB in 1s (754 kB/s)
Selecting previously unselected package python-opengl.
(Reading database ... 144433 files and directories currently installed.)
Preparing to unpack .../python-opengl_3.1.0+dfsg-1_all.deb ...
Unpacking python-opengl (3.1.0+dfsg-1) ...
Setting up python-opengl (3.1.0+dfsg-1) ...
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following NEW packages will be installed:
  xvfb
0 upgraded, 1 newly installed, 0 to remove and 31 not upgraded.
Need to get 784 kB of archives.
After this operation, 2,266 kB of additional disk space will be used.
Get:1 http://archive.ubuntu.com/ubuntu bionic-updates/universe amd64 xvfb amd64 2:1.19.6-1ubuntu4.4 [784 kB]
Fetched 784 kB in 1s (1,096 kB/s)
Selecting previously unselected package xvfb.
(Reading database ... 146788 files and directories currently installed.)
Preparing to unpack .../xvfb_2%3a1.19.6-1ubuntu4.4_amd64.deb ...
Unpacking xvfb (2:1.19.6-1ubuntu4.4) ...
Setting up xvfb (2:1.19.6-1ubuntu4.4) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
Collecting pyvirtualdisplay
  Downloading https://files.pythonhosted.org/packages/69/ec/8221a07850d69fa3c57c02e526edd23d18c7c05d58ed103e3b19172757c1/PyVirtualDisplay-0.2.5-py2.py3-none-any.whl
Collecting EasyProcess
  Downloading https://files.pythonhosted.org/packages/48/3c/75573613641c90c6d094059ac28adb748560d99bd27ee6f80cce398f404e/EasyProcess-0.3-py2.py3-none-any.whl
Installing collected packages: EasyProcess, pyvirtualdisplay
Successfully installed EasyProcess-0.3 pyvirtualdisplay-0.2.5
Collecting piglet
  Downloading https://files.pythonhosted.org/packages/11/56/6840e5f45626dc7eb7cd5dff57d11880b3113723b3b7b1fb1fa537855b75/piglet-1.0.0-py2.py3-none-any.whl
Collecting piglet-templates
  Downloading https://files.pythonhosted.org/packages/d0/dc/d628dcdf0b38b8f230e9c2309bfa370d2e3fb95e9e9c260213d10fde91ac/piglet_templates-1.0.0-py3-none-any.whl (63kB)
     |████████████████████████████████| 71kB 2.1MB/s 
Requirement already satisfied: attrs in /usr/local/lib/python3.6/dist-packages (from piglet-templates->piglet) (19.3.0)
Collecting Parsley
  Downloading https://files.pythonhosted.org/packages/2b/d6/4fed8d65e28a970e1c5cb33ce9c7e22e3de745e1b2ae37af051ef16aea3b/Parsley-1.3-py2.py3-none-any.whl (88kB)
     |████████████████████████████████| 92kB 4.9MB/s 
Requirement already satisfied: astunparse in /usr/local/lib/python3.6/dist-packages (from piglet-templates->piglet) (1.6.3)
Requirement already satisfied: markupsafe in /usr/local/lib/python3.6/dist-packages (from piglet-templates->piglet) (1.1.1)
Requirement already satisfied: six<2.0,>=1.6.1 in /usr/local/lib/python3.6/dist-packages (from astunparse->piglet-templates->piglet) (1.12.0)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.6/dist-packages (from astunparse->piglet-templates->piglet) (0.34.2)
Installing collected packages: Parsley, piglet-templates, piglet
Successfully installed Parsley-1.3 piglet-1.0.0 piglet-templates-1.0.0
# 필요한 모듈 설치
import tensorflow as tf
import gym
from IPython import display
import cv2
from pyvirtualdisplay import Display
from IPython import display
import matplotlib.pyplot as plt
from collections import deque
import numpy as np
import random
%matplotlib inline
Display().start()
xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!
<Display cmd_param=['Xvfb', '-br', '-nolisten', 'tcp', '-screen', '0', '1024x768x24', ':1001'] cmd=['Xvfb', '-br', '-nolisten', 'tcp', '-screen', '0', '1024x768x24', ':1001'] oserror=None return_code=None stdout="None" stderr="None" timeout_happened=False>
# 카트폴 게임 환경을 만듦
env = gym.make("CartPole-v1")
env.render('rgb_array')
# 2 - 액션 종류 슈 (아웃풋)
action_num=env.action_space.n
# 4 - 상태 종류 수 (인풋)
state_num=env.observation_space.shape[0]
state_num
4
# dqn 모델 만들기 (Q 함수를 모사(예측)할 모델임)
dqn_model=tf.keras.models.Sequential()
dqn_model.add(tf.keras.layers.Dense(128,input_shape=(state_num,),activation='relu'))
dqn_model.add(tf.keras.layers.Dense(action_num))
dqn_model.compile(loss='mse',optimizer=tf.keras.optimizers.Adam(0.001))

# target이 움직이지 않도록 따로 모델 정의
target_model=tf.keras.models.Sequential()
target_model.add(tf.keras.layers.Dense(128,input_shape=(state_num,),activation='relu'))
target_model.add(tf.keras.layers.Dense(action_num))
target_model.compile(loss='mse',optimizer=tf.keras.optimizers.Adam(0.001))

# dqn모델과 target모델의 값이 같도록 업데이트
target_model.set_weights(dqn_model.get_weights())
# 에피소드 수만큼 학습
episode_count=1000

# 플레이를 저장할 메모리 리스트를 만듬
# 최근 플레이 10000개까지 기억 - 넘기면 앞쪽 기억은 삭제
memory=deque(maxlen=10000)

# 점수를 기록할 리스트
scores = []

# E-Greedy 에서 탐험할 입실론 - epsilon_decay 만큼 조금씩 줄어들어 min값으로 변경됨
epsilon= 0.9
epsilon_min = 0.1
epsilon_decay = epsilon_min / epsilon
epsilon_decay = epsilon_decay ** (1. / float(300))

# 배치 사이즈
batch_size=64

# 리워드 할인율
reward_discount_rate=0.999

# 타겟데이터 업데이트 비율
train_count=0
target_update_count=30

for episode in range(episode_count):
    state = env.reset()
    # 차원을 맞추어 준다
    state = np.reshape(state, [1, state_num])
    done = False
    total_reward = 0
    while not done:
        # 입실론값보다 작으면 랜덤 / 아니면 DQN모델에 물어보고 가장 점수가 높은 행동을 한다
        if(np.random.rand()) < epsilon:
            action=env.action_space.sample()
        else:
            q_val=dqn_model.predict(state)
            action=np.argmax(q_val[0])

        next_state, reward, done, _ = env.step(action)
        next_state = np.reshape(next_state, [1, state_num])
        i=(state,action,reward/100.,next_state,done)
        
        # 메모리에 작업 내용을 기록한다
        memory.append(i)
        
        # 다음상태를 현사태로 변경하여 계속 진행한다
        state = next_state
        total_reward += reward

    # 메모리가 일정량 차면 학습 (배치 사이즈보단 커야 함)
    if len(memory) >= 1000:
        sample=random.sample(memory,batch_size)
        # 학습에 쓰일 리스트
        state_batch=[]
        q_val_batch=[]
        # 샘플에 있던 내용으로 학습
        for _state,_action,_reward,_next_state,_done in sample:
            q_val=dqn_model.predict(_state)
            
            # dqn - q=r + d_r*max(q')
            
            target_q_val=_reward+ reward_discount_rate * np.max(target_model.predict(_next_state)[0])
            
            #double-dqn
            # target_q_val=np.argmax(dqn_model.predict(_next_state)[0])
            # target_q_val=target_model.predict(_next_state)[0][target_q_val]
            # target_q_val=_reward+ reward_discount_rate * target_q_val
            
            if _done:
                q_val[0][_action] = _reward    
            else:
                q_val[0][_action] = target_q_val
                
            state_batch.append(_state[0])
            q_val_batch.append(q_val[0])    
        
        # 학습하고 타겟모델을 DQN모델로 업데이트 하고, 입실론 값을 줄임
        dqn_model.train_on_batch(np.array(state_batch),np.array(q_val_batch))
        if(epsilon>epsilon_min):
            epsilon *= epsilon_decay
        train_count=train_count+1
        
        if train_count%target_update_count==0:
            target_model.set_weights(dqn_model.get_weights())
            print('타겟모델 업데이트')
    scores.append(total_reward)
    if(total_reward>450):
        dqn_model.save('/gdrive/My Drive/hjk_dqn_r1_model.h5')
    mean_score = np.mean(scores)
    
    print(episode+1,total_reward,epsilon)

    if (episode+1) % 20 == 0:
        print("Episode %d: Mean survival = %0.2lf in %d episodes" %(episode+1, mean_score, 20))
        if mean_score >= 400:
            break
        scores = []

env.close() 
dqn_model=tf.keras.models.load_model('/gdrive/My Drive/hjk_dqn_r1_model.h5')
env = gym.make('CartPole-v1')
state=env.reset()
state = np.reshape(state, [1, state_num])
done=False
# img = plt.imshow(env.render('rgb_array')) # only call this once
total_reward=0
img_avi=np.zeros((400,600,3))
fcc=cv2.VideoWriter_fourcc(*'DIVX')
out=cv2.VideoWriter('/gdrive/My Drive/hjk_dqn_r1.avi',fcc,10.0,(600,400))
while not done:
    # img.set_data(env.render('rgb_array')) # just update the data
    # display.display(plt.gcf())
    # display.clear_output(wait=True)
    img_avi=env.render('rgb_array')
    action = np.argmax(dqn_model.predict(state)[0])
    # action = env.action_space.sample()
    next_state, reward, done, _ = env.step(action)
    next_state = np.reshape(next_state, [1, state_num])
    state = next_state
    total_reward += reward
    out.write(np.uint8(img_avi))
print(total_reward)
out.release()
cv2.destroyAllWindows()
500.0