맨위로버튼이미지

from lib.UpbitTrade import UpbitTrade
from lib.ColoredLogger import COLORS_LOG as cl
from lib.Upsert import aio_data_update
# from lib.upbitSellBuy import *
from lib.dl_upbitSellBuy import predict
from lib.Common import send_slack
from lib.Common import make_real_rate
from lib.Common import dfTrend
from lib.Common import dfInclease
import pyupbit
import time
import sys
import os
import aiomysql
import pymysql
import asyncio

import logging
import logging.config
import json

import common.constrant as Const
import config.localconf as conf
import pandas as pd
import numpy as np

config = json.load(open('config/logging.json'))
logging.config.dictConfig(config)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

table_name="xxx.TB_ETH_TRADE"
table_report="xxx.TB_ETH_DAILY_REPORT"
table_config="xxx.TB_ETH_CASH_CONFIG"
table_crashed="xxx.TB_CRASHED_REPORT"
table_realrate="xxx.TB_REAL_REPORT"

pd.set_option('display.max_columns', 13)
pd.set_option('display.width', 200)

def get_avg_buy_price():
    try : 
        amount = upbit.get_amount('ALL')
        unit = trade.get_balance()[0]
        if unit == 0:
            return 0
        else:
            return amount / unit
    except Exception as ex:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logger.error("get_avg_buy_price exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
        return 0

def get_avg_sell_price():
    try:
        balances, req = upbit.get_balances(contain_req=True)
        if len(balances) == 0:
            return 0

        avg_sell_price = 0
        for x in balances:
            if x['currency'] == conf.TRADE_TICKER:
                avg_sell_price = float(x['avg_sell_price'])
                break
        return avg_sell_price
    except Exception as x:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logger.error("get_avg_sell_price exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
        return 0

async def read_all(sqlcon) -> pd.DataFrame:
    #read Backup DB 
    try:
        cursor = await sqlcon.cursor(aiomysql.cursors.DictCursor)
        str_query = "select * from %s" % table_name
        await cursor.execute(str_query)
        data = await cursor.fetchall()
        datadf = pd.DataFrame(data)
        return datadf
    except Exception as ex:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logger.error("read from db exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
    finally:
        await cursor.close()

async def read_from(sqlcon, ntime) -> pd.DataFrame:
    try:
        cursor = await sqlcon.cursor(aiomysql.cursors.DictCursor)
        str_query = "select * from %s where `time` > %d" % (table_name, ntime)
        # logger.info("str_query:%s", str_query)
        await cursor.execute(str_query)
        data = await cursor.fetchall()
        datadf = pd.DataFrame(data)
        # logger.info("return df")
        # logger.info(datadf)
        return datadf
    except Exception as ex:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logger.error("read from db exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
    finally:
        await cursor.close()

async def trade_proc():
    last_time = ""
    krw = 0
    first_time = True
    avg_buy_price = 0.0
    avg_sell_price = 0.0
    slack_flag = 0
    old_rate = 0
    rate_trend = ""

    try:
        pool = await aiomysql.create_pool(host="193.xxx.xxx.xx", user="xxxxxx", password='xxxxxxx', db='xxxxx')
        async with pool.acquire() as sqlcon:
            
            df = await read_all(sqlcon)
            idx = df.index.max()
            idx_time = df.loc[idx, 'time']
            sqlcon.close()
        while True:
            async with pool.acquire() as sqlcon:
                try:
                    await get_trade_yn(sqlcon)
                    start_time = time.time()

                    add_df = await read_from(sqlcon, idx_time)
                    df = pd.concat([df, add_df])
                    df = df.drop_duplicates(['time'], keep='first', inplace=False, ignore_index=True)
                    try:
                        avg_buy_price = get_avg_buy_price()
                        logger.info(cl["BOLD"] + cl["RED"] + "평균매수단가:%.2f" + cl["RESET"], avg_buy_price)
                    except Exception as ex:
                        exc_type, exc_obj, exc_tb = sys.exc_info()
                        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                        logger.error("get_avg_buy_price -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))

                    ###잔고 조회
                    current_unit = 0.0
                    try:
                        balance = trade.get_balance()
                        price = pyupbit.get_current_price("KRW-" + conf.TRADE_TICKER)
                        logger.info("현재가 : %.4f" % price)
                        limit_cash = (balance[0] * price + balance[1]) * conf.REINVESTMENT_RATE if balance[0] != 0 or balance[1] != 0 else base_balance
                        logger.info("LIMIT_CASH: %.2f", limit_cash)
                        trade.set_limitcash(Const.LIMIT_CASH)
                        rate = ((balance[0]*price + balance[1] - base_balance)*100)/base_balance if base_balance is not None and base_balance != 0 else 0
                        trend_rate = old_rate - rate
                        old_rate = rate
                        if trend_rate == 0:
                            rate_trend += "="
                        elif trend_rate > 0:
                            rate_trend += "V"
                        else:
                            rate_trend += "^"

                        if len(rate_trend) > 5:
                            rate_trend = rate_trend[-5:]

                        logger.info(cl["ITELIC"] + cl["RED"] + "평가금액:%.2f 수익률: %.2f%% %s" + cl["RESET"], balance[0]*price + balance[1], rate, rate_trend)
                        #현재의 원화 잔고 얻기
                        krw = balance[1]
                        current_unit = balance[0]
                        logger.info(cl["BOLD"] + cl["RED"] + "잔고조회: %.5f %.2f" + cl["RESET"], balance[0], balance[1])
                    except Exception as ex:
                        exc_type, exc_obj, exc_tb = sys.exc_info()
                        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                        logger.error("get_current_price -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))

                    logger.info(cl["BOLD"] + cl["GREEN"] + "clsmin %.2f(%s), clsmax:%.2f(%s), close:%.2f(%s)" + cl["RESET"], 
                        df.loc[idx, 'clsmin'], dfInclease(df.index.max(), df, "clsmin"), df.loc[idx, 'clsmax'], 
                        dfInclease(df.index.max(), df, "clsmax"), df.loc[idx, 'close'], dfInclease(df.index.max(), df, "close"))
                    logger.info(cl["BOLD"] + cl["GREEN"] + "min_rate:%.2f%%(%s) %s max_rate:%.2f%%(%s) %s" + cl["RESET"], 
                        df.loc[idx, 'min_rate'], dfInclease(df.index.max(), df, "min_rate"), 
                        dfTrend(df.index.max(), df, "minratedeg"), df.loc[idx, 'max_rate'], 
                        dfInclease(df.index.max(), df, "max_rate"), 
                        dfTrend(df.index.max(), df, "maxratedeg"))
                    logger.info("time date:" + time.strftime("%Y%m%d%H%M%S", time.localtime(idx_time)))
                    logger.info(cl["BOLD"] + cl["GREEN"] + "trade_ok:%s" + cl["RESET"], str(trade_ok))
                    dx = df
                    pred = predict(logger, dx, idx)
                    logger.info(cl["BOLD"] + cl["GREEN"] + "predict action:%s" + cl["RESET"], str(pred.get_dl_action()))
                    if pred.fsell() and current_unit > 0 :
                        logger.info("매도 타이밍: %s ", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(idx_time)))
                        if trade_ok == True:
                            ret = trade.sell_crypto_currency()
                            if ret is not None:
                                cursor = await sqlcon.cursor(aiomysql.cursors.DictCursor)
                                try:
                                    ret = upbit.get_order("KRW-" + conf.TRADE_TICKER)
                                    while len(ret) != 0 : 
                                        ret = upbit.get_order("KRW-" + conf.TRADE_TICKER)
                                        await asyncio.sleep(0.01)
                                    str_query = """SELECT * 
                                        FROM main.TB_TRADE_HIST 
                                        WHERE user_id='%s' 
                                        AND qty > 0 
                                        AND end_dt = 0""" % user
                                    await cursor.execute(str_query)
                                    data = await cursor.fetchall()
                                    if len(data) > 0:
                                        fatch_result = pd.DataFrame(data)
                                        start_dt = fatch_result['start_dt'][0]
                                        qty = fatch_result['qty'][0]
                                        # logger.info("start_dt:%d", start_dt)
                                        str_query = """
                                        UPDATE main.TB_TRADE_HIST 
                                        SET 
                                            sell_qty = qty
                                            , qty=0
                                            , close=%f 
                                            , sell_sum =%f
                                            , end_dt=%d
                                        WHERE user_id = '%s' AND start_dt = %d""" % (
                                            df.loc[idx, 'close'], df.loc[idx, 'close']*qty, time.time(),  user, start_dt
                                        )
                                        await cursor.execute(str_query)
                                        await sqlcon.commit()
                                except Exception as ex:
                                    exc_type, exc_obj, exc_tb = sys.exc_info()
                                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                                    logger.error("read db error -> exception! " + str(ex) + "` %s: %d", fname, exc_tb.tb_lineno)
                                finally:
                                    await cursor.close()
                    elif pred.fbuy():
                        logger.info("매수 타이밍 %s", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(idx_time)))
                        if trade_ok == True:
                            ret = trade.buy_crypto_currency()
                            if ret is not None:
                                try:
                                    ret = upbit.get_order("KRW-" + conf.TRADE_TICKER)
                                    while len(ret) != 0 :
                                        ret = upbit.get_order("KRW-" + conf.TRADE_TICKER)
                                        await asyncio.sleep(0.01)
                                    avg_buy_price = get_avg_buy_price()
                                    balance = trade.get_balance()
                                    trdhist_clmns = ['user_id', 'start_dt', 'ticker', 'qty', 'cost', 'cost_sum', 'sell_qty', 'close', 'sell_sum', 'end_dt']
                                    trdhist_data  = [[user, df.loc[idx, 'time'], conf.TRADE_TICKER, balance[0], avg_buy_price, balance[0] * avg_buy_price,
                                                    0, 0, 0, 0]]
                                    trdhist_df = pd.DataFrame.from_records(data=trdhist_data, columns=trdhist_clmns)
                                    await aio_data_update(trdhist_df, sqlcon, "main.TB_TRADE_HIST", "replace")
                                except Exception as ex:
                                    exc_type, exc_obj, exc_tb = sys.exc_info()
                                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                                    logger.error("read db error -> exception! " + str(ex) + "` %s: %d", fname, exc_tb.tb_lineno)
                                finally:
                                    await cursor.close()

                    logger.info(df.tail(5))

                    idx = df.index.max()
                    idx_time = df.loc[idx, 'time']
                    sqlcon.close()
                except Exception as ex:
                    exc_type, exc_obj, exc_tb = sys.exc_info()
                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                    logger.error("`main -> exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
                    time.sleep(60)
                finally:
                    end_time = time.time()
                    if (end_time - start_time) > 0 and (end_time - start_time) <= Const.SLEEP_TIME:
                        await asyncio.sleep(Const.SLEEP_TIME - (end_time - start_time))
    except Exception as ex:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        logger.error("`main -> exception! %s ` : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
        time.sleep(60)
    finally:
        pool.close()
        await pool.wait_closed()         

async def get_trade_yn(sqlcon):
    global trade_ok
    global base_balance
    user = os.getenv('user')
    
    str_sql = """
        SELECT 
            a.TRADE_YN
            ,a.BASE_BALANCE
        FROM main.TN_PAYMENT_ACNT a
        WHERE a.USER_ID = '%s'
    """ % user
    cursor = await sqlcon.cursor(aiomysql.cursors.DictCursor)
    await cursor.execute(str_sql)
    user_info = pd.DataFrame(await cursor.fetchall())
    await cursor.close()

    trade_yn = np.asarray(user_info)[0][0]
    base_balance = float(np.asarray(user_info)[0][1])
    trade_ok = (trade_yn == 'Y')

def get_env():
    global upbit
    global trade
    global user
    user = os.getenv('user')
    logger.info("user[%s]", user)
    
    sqlcon = pymysql.connect(host='193.xxx.xxx.xx', user='xxxxxx', password='xxxxxxx', db='xxxx')
    str_sql = """
        SELECT 
            a.PUB_KEY
            ,a.APP_KEY
            ,a.SECU_KEY
            ,a.BASE_BALANCE
        FROM TN_PAYMENT_ACNT a
        WHERE a.USER_ID = '%s'
    """ % user
    cursor = sqlcon.cursor(pymysql.cursors.DictCursor)
    cursor.execute(str_sql)
    user_info = pd.DataFrame(cursor.fetchall())
    cursor.close()

    pub_key = np.asarray(user_info)[0][0]
    app_key = np.asarray(user_info)[0][1]
    sec_key = np.asarray(user_info)[0][2]
    base_balance = float(np.asarray(user_info)[0][3])

    # aes = AESCryptoCBC(pub_key)

    enc_app_key = app_key
    enc_sec_key = sec_key

    upbit = pyupbit.Upbit(enc_app_key, enc_sec_key)
    trade  = UpbitTrade(upbit, logger, conf.TRADE_TICKER, base_balance, base_balance * conf.REINVESTMENT_RATE)

if __name__ == '__main__':
    get_env()
    asyncio.run(trade_proc())​

 

import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F

import plotly.graph_objects as go
import plotly.subplots as ms
import plotly as plt
from PIL import Image
import io
import os.path as path
import talib
import numpy as np

data_size = 100
action_kind = 4
screen_height = 50
screen_width  = 70

class DQN(nn.Module):
    def __init__(self, device, h, w, outputs, qsize):
        super(DQN, self).__init__()
        self.conv1 = nn.Conv2d(4, h*w, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(h*w)
        self.conv2 = nn.Conv2d(h*w, qsize, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(qsize)
        self.device = device
        self.conv3 = nn.Conv2d(qsize, qsize, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(qsize)

        linear_input_size = 3 * qsize
        self.head = nn.Linear(linear_input_size, outputs)

    # Called with either one element to determine next action, or a batch
    # during optimization. Returns tensor([[left0exp,right0exp]...]).
    def forward(self, x):
        x = x.to(self.device)
        x = F.relu(self.bn1(self.conv1(x)))
        # print("x1------->", x.size())
        x = F.relu(self.bn2(self.conv2(x)))
        # print("x2------->", x.size())
        x = F.relu(self.bn3(self.conv3(x)))
        # print("x3------->", x.size())
        return self.head(x.view(x.size(0), -1))

def get_chart(df, idx, max_data:int=300, i_w:int=140, i_h:int=100):
    ndf = df.head(idx).tail(max_data)
    ndf.reset_index(drop=True, inplace=True)
    
    candle = go.Candlestick(x=ndf.index,open=ndf['open'],high=ndf['high'],low=ndf['low'],close=ndf['close'], increasing_line_color = 'red',decreasing_line_color = 'blue', showlegend=False)
    upper = go.Scatter(x=ndf.index, y=ndf['upper'], line=dict(color='red', width=2), name='upper', showlegend=False)
    ma20 = go.Scatter(x=ndf.index, y=ndf['ma20'], line=dict(color='black', width=2), name='ma20', showlegend=False)
    lower = go.Scatter(x=ndf.index, y=ndf['lower'], line=dict(color='blue', width=2), name='lower', showlegend=False)

    volume = go.Bar(x=ndf.index, y=ndf['volume'], marker_color='red', name='volume', showlegend=False)

    MACD = go.Scatter(x=ndf.index, y=ndf['macd'], line=dict(color='blue', width=2), name='MACD', legendgroup='group2', legendgrouptitle_text='MACD')
    MACD_Signal = go.Scatter(x=ndf.index, y=ndf['signal'], line=dict(dash='dashdot', color='green', width=2), name='MACD_Signal')
    MACD_Oscil = go.Bar(x=ndf.index, y=ndf['flag'], marker_color='purple', name='MACD_Oscil')

    fast_k = go.Scatter(x=ndf.index, y=ndf['fast_k'], line=dict(color='skyblue', width=2), name='fast_k', legendgroup='group3', legendgrouptitle_text='%K %D')
    slow_d = go.Scatter(x=ndf.index, y=ndf['slow_d'], line=dict(dash='dashdot', color='black', width=2), name='slow_d')

    PB = go.Scatter(x=ndf.index, y=ndf['PB']*100, line=dict(color='blue', width=2), name='PB', legendgroup='group4', legendgrouptitle_text='PB, MFI')
    MFI10 = go.Scatter(x=ndf.index, y=ndf['MFI10'], line=dict(dash='dashdot', color='green', width=2), name='MFI10')

    RSI = go.Scatter(x=ndf.index, y=ndf['rsi14'], line=dict(color='red', width=2), name='RSI', legendgroup='group5', legendgrouptitle_text='RSI')
    
    # 스타일
    fig = ms.make_subplots(rows=5, cols=2, specs=[[{'rowspan':4},{}],[None,{}],[None,{}],[None,{}],[{},{}]], shared_xaxes=True, horizontal_spacing=0.03, vertical_spacing=0.01)

    fig.add_trace(candle,row=1,col=1)
    fig.add_trace(upper,row=1,col=1)
    fig.add_trace(ma20,row=1,col=1)
    fig.add_trace(lower,row=1,col=1)

    fig.add_trace(volume,row=5,col=1)

    fig.add_trace(candle,row=1,col=2)
    fig.add_trace(upper,row=1,col=2)
    fig.add_trace(ma20,row=1,col=2)
    fig.add_trace(lower,row=1,col=2)

    fig.add_trace(MACD,row=2,col=2)
    fig.add_trace(MACD_Signal,row=2,col=2)
    fig.add_trace(MACD_Oscil,row=2,col=2)

    fig.add_trace(fast_k,row=3,col=2)
    fig.add_trace(slow_d,row=3,col=2)

    fig.add_trace(PB,row=4,col=2)
    fig.add_trace(MFI10,row=4,col=2)

    fig.add_trace(RSI,row=5,col=2)

    # 추세추종
    # trend_fol = 0
    # trend_refol = 0
    # for i in ndf.index:
    #     if ndf['PB'][i] > 0.8 and ndf['MFI10'][i] > 80:
    #         trend_fol = go.Scatter(x=[ndf.index[i]], y=[ndf['close'][i]], marker_color='orange', marker_size=20, marker_symbol='triangle-up', opacity=0.7, showlegend=False)
    #         fig.add_trace(trend_fol,row=1,col=1)
    #     elif ndf['PB'][i] < 0.2 and ndf['MFI10'][i] < 20:
    #         trend_fol = go.Scatter(x=[ndf.index[i]], y=[ndf['close'][i]], marker_color='darkblue', marker_size=20, marker_symbol='triangle-down', opacity=0.7, showlegend=False)
    #         fig.add_trace(trend_fol,row=1,col=1)

    # 역추세추종
    # for i in ndf.index:
    #     if ndf['PB'][i] < 0.05 and ndf['IIP21'][i] > 0:
    #         trend_refol = go.Scatter(x=[ndf.index[i]], y=[ndf['close'][i]], marker_color='purple', marker_size=20, marker_symbol='triangle-up', opacity=0.7, showlegend=False)  #보라
    #         fig.add_trace(trend_refol,row=1,col=1)
    #     elif df['PB'][i] > 0.95 and ndf['IIP21'][i] < 0:
    #         trend_refol = go.Scatter(x=[ndf.index[i]], y=[ndf['close'][i]], marker_color='skyblue', marker_size=20, marker_symbol='triangle-down', opacity=0.7, showlegend=False)  #하늘
    #         fig.add_trace(trend_refol,row=1,col=1)    

    # fig.add_trace(trend_fol,row=1,col=1)
    # 추세추총전략을 통해 캔들차트에 표시합니다.

    # fig.add_trace(trend_refol,row=1,col=1)
    # 역추세 전략을 통해 캔들차트에 표시합니다.
    
    # fig.update_layout(autosize=True, xaxis1_rangeslider_visible=False, xaxis2_rangeslider_visible=False, margin=dict(l=50,r=50,t=50,b=50), template='seaborn', title=f'({ticker})의 날짜: ETH [추세추종전략:오↑파↓] [역추세전략:보↑하↓]')
    # fig.update_xaxes(tickformat='%y년%m월%d일', zeroline=True, zerolinewidth=1, zerolinecolor='black', showgrid=True, gridwidth=2, gridcolor='lightgray', showline=True,linewidth=2, linecolor='black', mirror=True)
    # fig.update_yaxes(tickformat=',d', zeroline=True, zerolinewidth=1, zerolinecolor='black', showgrid=True, gridwidth=2, gridcolor='lightgray',showline=True,linewidth=2, linecolor='black', mirror=True)
    # fig.update_traces(xhoverformat='%y년%m월%d일')
    # size = len(img)
    # img = plt.io.to_image(fig, format='png')
    # canvas = FigureCanvasBase(fig)
    # img = Image.frombytes(mode='RGB', size=(700, 500), data=fig.to_image().to, decoder_name='raw')
    # img = Image.frombytes('RGBA', (700, 500), plt.io.to_image(fig, format='png'), 'raw')
    # img = Image.fromarray('RGBA', (700, 500), np.array(plt.io.to_image(fig, format='png')), 'raw')
    # img = Image.fromarray(np.array(plt.io.to_image(fig, format='png')), 'RGB')
    # img = Image.fromarray(np.array(plt.io.to_image(fig, format='png')), 'L')
    # img = Image.open(io.BytesIO(plt.io.to_image(fig, format='png', width=140, height=100)))
    # img = Image.open(io.BytesIO(plt.io.to_image(fig, format='png', width=700, height=500)))
    img = Image.open(io.BytesIO(plt.io.to_image(fig, format='png')))
    # print(img)
    img.convert("RGB")
    # print(img)
    img.thumbnail((i_h, i_w), Image.ANTIALIAS)
    # print(img)
    # img = Image.Image(img, 'RGB')
    # img.show()
    return img

from lib.addAuxiliaryData import Auxiliary

class predict():
    def __init__(self, logger, df, idx) -> None:
        df['open'] = df['start']
        df['sma3'] = talib.SMA(np.asarray(df['close']), 3)
        aux = Auxiliary(logger)
        aux.add_macd(df,12,26,9)
        aux.add_bbands(df)
        aux.add_relative_strength(df)
        aux.add_stock_cast(df)
        aux.add_iip(df)
        aux.add_mfi(df)
        # df.dropna(inplace=True)
        df.reset_index(drop=True,inplace=True)
        # gpu가 사용 가능한 경우에는 device를 gpu로 설정하고 불가능하면 cpu로 설정합니다.
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        converter = torchvision.transforms.ToTensor()
        # 모델을 지정한 장치로 올립니다.
        self.model = DQN(device, screen_height, screen_width, action_kind, 4).to(device)
        # model = CNNRNN(device, screen_height, screen_width, action_kind, 4).to(device)
        # model = CNN().to(device)
        self.model = nn.DataParallel(self.model, device_ids=[0,1]).to(device)

        if path.exists("/home/yubank/pt/train_dqn_{0:02d}_{1}.pt".format(action_kind, device)):
            self.model.load_state_dict(torch.load("/home/yubank/pt/train_dqn_{0:02d}_{1}.pt".format(action_kind, device)))

        self.model.eval()

        self.x = get_chart(df, idx, data_size, i_w = screen_width, i_h = screen_height)
        self.x = converter(self.x).unsqueeze(0).to(device).squeeze(1)

    def get_dl_action(self):
        output = self.model.forward(self.x)
        _,action = torch.max(output,1)
        # print("action:", action)
        return action.cpu().numpy()[0]

    def fbuy(self):
        return True if self.get_dl_action() == 0 else False

    def fsell(self):
        return True if self.get_dl_action() == (action_kind - 1) else False

 

 

우여 곡절이 중간에 조금 있었습니다.

 

학습된 모델을 pt파일로 저장을 한 다음 predict 클래스를 만들었습니다.

 

파이선의 클래스는 static method와 일반 method의 개념이 없어서 그냥 함수 묶음으로도 사용이 가능합니다.


적용 후 실제 upbit에서 거래가 발생했고 한 차례도 손실없이 매매를 하고 있습니다.

 

1주일을 일단 집에서 돌리고 고로케이션을 알아보고 있는데 최소 30만원(랙 마운트 케이스 및 파워구매, 대충 4u 로케이션 가격 15만원 설치비 5만원(IDC내부 회선 설치 및 전원공사임 내서버 설치는 별도임))의 추가 비용이 발생합니다.  

 

수익률이 안나온다면 코로케이션을 진행할 수 없을 것 같네요.

 

맺은말

궁금한 점이나 이상하다고 생각되시면 답글로 남겨 주세요.

세세하고 친절히 답변 드리겠습니다.

반응형
LIST
data.dat
2.86MB

https://colab.research.google.com/drive/108pVqpUtkD6Tz-cKtVfH-0_DXHZPFq_p?usp=share_link

cnn_img.ipynb

Colaboratory notebook

colab.research.google.com

https://colab.research.google.com/?hl=ko

Google Colaboratory

colab.research.google.com

위 링크에 접속하면 코랩의 새 노트가 뜹니다. 취소하고 자신이 로컬에서 작성한 노트를 업로드해도 되고 계속해서 새 코드를 작성해도 됩니다.

맨위로버튼이미지

가장 위의 링크는 제가 작성한 CNNRNN모델이며 COLAB의 세션 타임이 그리 길지 않기 때문에 학습용 데이터로 기동 여부만 확인 후 로컬 시스템이나 azure를 사용하거나 코랩 상위랩 구매가 필요할 수 있습니다. COLAB은 무료이기는 하지만 그래도 쓸만한 하드웨어 사양과 4~5시간의 런타임 세션을 제공해 줍니다.

반응형
LIST

'python > 머쉰러닝' 카테고리의 다른 글

Scikit-Learn 매매데이터 분석(1)  (0) 2022.07.03

맨위로버튼이미지

 

지난차의 CNN+RNN모델에서 변경 가능한 파라메트는 hidden_dim값 일것입니다. 여기서 궁금점이 하나 생기는 것이 CNN+RNN,CNN+ISTM,CNN+GRU의 성능의 차이가 있을까? 하는것 입니다.

import torch
import torch.nn as nn
import torch.nn.functional as F

class CNNRNN2(nn.Module):
    def __init__(self, device, h, w, embedding_size, outputs, hdnsize, num_layer):
        super(CNNRNN2, self).__init__()
        self.device = device
        self.hidden_size = hdnsize
        self.conv1 = nn.Conv2d(4, h*w, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(h*w)
        self.conv2 = nn.Conv2d(h*w, hdnsize, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(hdnsize)
        self.conv3 = nn.Conv2d(hdnsize, hdnsize, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(hdnsize)
        self.embedding_size = embedding_size
        self.num_layer = num_layer

        self.encoder = nn.Embedding(54 * hdnsize, embedding_size)
        self.rnn = nn.RNN(embedding_size, hdnsize, num_layer)
        self.decoder = nn.Linear(hdnsize, outputs)
        
    def init_hidden(self):
        return torch.zeros(1, self.hidden_size)
    
    def forward(self, x):
        x = x.to(self.device)
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        hidden = self.init_hidden().to(self.device)
        x, hidden = self.rnn(x, hidden)
        x = self.decoder(x.view(8, -1))
        
        return x

embedding을 포함한 cnn+rnn 모델 cnnrnn2.py 입니다.

import torch
import torch.nn as nn
import torch.nn.functional as F

class CNNRNN2(nn.Module):
    def __init__(self, device, h, w, embedding_size, outputs, hdnsize, num_layer):
        super(CNNRNN2, self).__init__()
        self.device = device
        self.hidden_size = hdnsize
        self.conv1 = nn.Conv2d(4, h*w, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(h*w)
        self.conv2 = nn.Conv2d(h*w, hdnsize, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(hdnsize)
        self.conv3 = nn.Conv2d(hdnsize, hdnsize, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(hdnsize)
        self.embedding_size = embedding_size
        self.num_layer = num_layer

        self.encoder = nn.Embedding(126 * hdnsize, embedding_size)
        self.rnn = nn.LSTM(embedding_size, hdnsize, num_layer)
        self.decoder = nn.Linear(hdnsize, outputs)
        
    def init_hidden(self):
        hidden = torch.zeros(num_layer, 8, self.hidden_size) #8 = batch_size
        cell = torch.zeros(num_layer, 8, self.hidden_size)   #8 = batch_size
        return hidden, cell
    
    def forward(self, x):
        x = x.to(self.device)
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        hidden = self.init_hidden().to(self.device)
        x, (hidden, cell) = self.rnn(x, (hidden, cell))
        x = self.decoder(x.view(8, -1))
        
        return x

embedding을 포함한 cnn+lstm모델인 cnnlstm.py입니다.

import torch
import torch.nn as nn
import torch.nn.functional as F

class CNNRNN2(nn.Module):
    def __init__(self, device, h, w, embedding_size, outputs, hdnsize, num_layer):
        super(CNNRNN2, self).__init__()
        self.device = device
        self.hidden_size = hdnsize
        self.conv1 = nn.Conv2d(4, h*w, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(h*w)
        self.conv2 = nn.Conv2d(h*w, hdnsize, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(hdnsize)
        self.conv3 = nn.Conv2d(hdnsize, hdnsize, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(hdnsize)
        self.embedding_size = embedding_size
        self.num_layer = num_layer

        self.encoder = nn.Embedding(126 * hdnsize, embedding_size)
        self.rnn = nn.GRU(embedding_size, hdnsize, num_layer)
        self.decoder = nn.Linear(hdnsize, outputs)
        
    def init_hidden(self):
        return torch.zeros(1, self.hidden_size)
    
    def forward(self, x):
        x = x.to(self.device)
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        hidden = self.init_hidden().to(self.device)
        x, hidden = self.rnn(x, hidden)
        x = self.decoder(x.view(8, -1))
        
        return x

embedding을 포함한 cnn+gru모델인 cnngru.py입니다.

import random
from collections import deque, namedtuple
from IPython.display import display, Math
from Account import Account
import math
from itertools import count
import os

import sys
import os.path as path
import plotly as plt
import torch
import torch.nn as nn
import torch.optim as optim
from Market import Market
import torchvision
import time
from cnnrnn2 import CNNRNN2
from memory import ReplayMemory, Experience
# from transformers import get_cosine_schedule_with_warmup
import transformers

action_kind = 3
max_episode = 5000
screen_height = 100
screen_width  = 140
data_size = 250
visit_cnt = [0] * action_kind
# replay_buffer = deque()
epsilon = 0.3
dis = 0.9

BATCH_SIZE = 8
# GAMMA = 0.999
GAMMA = 0.99
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
steps_done = 0
loss = any
WINDOW_START = 0
WINDOW_SIZE  = 1500

# for i in range(torch.cuda.device_count()):
#     print(torch.cuda.get_device_name(i))
device_str = "cuda"
device = torch.device(device_str)
    
memory = ReplayMemory(BATCH_SIZE)
converter = torchvision.transforms.ToTensor()
market = Market()
train_net = CNNRNN2(device, screen_height, screen_width, 10, action_kind, 32, 2).to(device)
train_net = nn.DataParallel(train_net, device_ids=[0,1]).to(device)

episode_durations = []
optimizer = optim.RMSprop(train_net.parameters(), lr=0.0001, eps=0.00000001)
# scheduler = get_cosine_schedule_with_warmup(optimizer, 5, base_lr=0.3, final_lr=0.01)
scheduler = transformers.get_cosine_schedule_with_warmup(optimizer, 
                                                         num_warmup_steps=5, 
                                                         num_training_steps=25)
def optimize_action(memory):
    if len(memory) < BATCH_SIZE:
        return None
    optimizer.zero_grad()
    epsode = memory.pop(BATCH_SIZE)
    batch = Experience(*zip(*epsode))
    state_batch = torch.cat(batch.state)
    action_batch = torch.cat(batch.action)
    state_action_values = train_net(state_batch).gather(1, action_batch)
    # optimizer = optim.RMSprop(train_net.parameters(), 0.01)
    
    criterion = nn.SmoothL1Loss()
    loss = criterion(state_action_values, action_batch)

    # Optimize the model
    # optimizer.zero_grad()
    loss.backward()
    for param in train_net.parameters():
        param.grad.data.clamp_(-1, 1)
    optimizer.step()
    return loss

# def select_action(df, idx):
#     try:
#         global steps_done
#         sample = random.random()
#         eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
#         steps_done += 1
#         if sample > eps_threshold:
#             if df.loc[idx, "closemax"] == df.loc[idx, "close"]:
#                 action = 2
#                 return  action
#             elif df.loc[idx, "closemin"] == df.loc[idx, "close"]:
#                 action = 1
#                 return  action
#             else:
#                 return 0
#         else:
#             action = random.randrange(action_kind)
#             return  action
#     except Exception as ex:
#         exc_type, exc_obj, exc_tb = sys.exc_info()
#         fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#         print("`select_action -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
#         return 0
def select_action(df, idx):
    try:
        if df.loc[idx, "closemax"] == df.loc[idx, "close"]:
            action = 2
            return  action
        elif df.loc[idx, "closemin"] == df.loc[idx, "close"]:
            action = 1
            return  action
        else:
            return 0
    except Exception as ex:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print("`select_action -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
        return 0
        
def get_chart(market, idx, max_data):
    img = market.get_chart(idx, max_data=max_data)
    if img is None:
        return None
    # img = Image.fromarray(np.uint8(cm.gist_earth(plt.io.to_image(fig, format='png')*255)))
    # im = Image.fromarray(img, bytes=True)
    # im = Image.fromarray(np.uint8(cm.gist_earth(img))/255)
    # im = Image.fromarray(np.uint8(img)/255)
    # img = img.resize((700, 500), resample=Image.BICUBIC)
    # img = Image.fromarray(cm.gist_earth(plt.io.to_image(fig, format='png'), bytes=True))
    # display(img)
    # chart = converter(img).unsqueeze(0).to(device)
    chart = converter(img).unsqueeze(0)
    return chart

def plot_durations(last_chart, curr_chart):
    plt.figure()
    # plt.subplot(1,2,1)
    img = plt.imshow(last_chart.cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation='none')
    plt.title('Example extracted screen')
    plt.figure(2)
    # plt.subplot(1,2,2)
    plt.clf()
    durations_t = torch.tensor(episode_durations, dtype=torch.float)
    plt.title('Training...')
    plt.xlabel('Episode')
    plt.ylabel('Duration')
    plt.plot(durations_t.numpy())
    # plt.show()
    # Take 100 episode averages and plot them too
    if len(durations_t) >= 100:
        means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
        means = torch.cat((torch.zeros(99), means))
        plt.plot(means.numpy())

    img.set_data(curr_chart.cpu().squeeze(0).permute(1, 2, 0).numpy())
    plt.pause(0.01)  # pause a bit so that plots are updated
    display.clear_output(wait=True)
    display.display(plt.gcf())
            
def main():
    if path.exists("pt/train_cnnrnn2_{}.pt".format(device_str)):
        train_net.load_state_dict(torch.load("pt/train_cnnrnn2_{}.pt".format(device_str)))
    train_net.train()
        
    for _ in range(10):    
        df = market.get_data()
        # df = df.head(WINDOW_SIZE)

        for epoch in range(max_episode):
            account = Account(df, 50000000)
            account.reset()
            for idx,_ in enumerate(df.index, start=data_size):
                try:
                    since = time.time()
                    curr_chart = get_chart(market, idx, data_size)
                    reward = 0
                    num_action = select_action(df, idx)
                    reward, real_action = account.exec_action(num_action, idx)
                    print("idx:%d==>action:%d, price:%.2f"%(idx, num_action, df.loc[idx, 'close']))
                    reward = torch.tensor([reward], device=device)
                    action = torch.tensor([[num_action]], device=device, dtype=torch.int64)
                                    
                    memory.push(curr_chart, action, reward)
                    while len(memory) >= BATCH_SIZE:
                        optimizer.zero_grad()
                        epsode = memory.pop(BATCH_SIZE)
                        batch = Experience(*zip(*epsode))
                        state_batch = torch.cat(batch.state)
                        action_batch = torch.cat(batch.action)
                        state_action_values = train_net(state_batch).gather(1, action_batch)
                        # optimizer = optim.RMSprop(train_net.parameters(), 0.01)
                        
                        criterion = nn.SmoothL1Loss()
                        loss = criterion(state_action_values, action_batch)

                        # Optimize the model
                        # optimizer.zero_grad()
                        loss.backward()
                        for param in train_net.parameters():
                            param.grad.data.clamp_(-1, 1)
                        optimizer.step()
                        if loss is not None:
                            print("epoch[%d:%d] epsode is next loss[%.10f]" % (epoch, idx, loss.item()))

                    if idx % TARGET_UPDATE == 0:
                        torch.save(train_net.state_dict(),"pt/train_cnnrnn2_{}.pt".format(device_str))
                    
                    spend = time.time() - since
                    print("idx:%d price [%.4f] unit[%.4f] used time[%.2f] agent rate:%.05f remind money:%.02f" 
                            % (idx, df.loc[idx, 'close'], account.unit, spend, account.rate, account.balance + account.unit * df.loc[idx, 'close']))
                    if account.is_bankrupt():
                        break
                    if idx == df.index.max():
                        break
                except Exception as ex:
                    exc_type, exc_obj, exc_tb = sys.exc_info()
                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                    print("`recall_training -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
            scheduler.step()

        print("end training DQN")
    
    print('Complete Training')

if __name__ == "__main__":
    main()

위의 3가지 모델을 학습하기 위한 학습 파일인 main_cnnrnn2.py 입니다. import 부분만 변경하여 훈련이 가능 할 것으로 예상됩니다.

대표이미지 출처:https://dgkim5360.tistory.com/entry/understanding-long-short-term-memory-lstm-kr

 

Long Short-Term Memory (LSTM) 이해하기

이 글은 Christopher Olah가 2015년 8월에 쓴 글을 우리 말로 번역한 것이다. Recurrent neural network의 개념을 쉽게 설명했고, 그 중 획기적인 모델인 LSTM을 이론적으로 이해할 수 있도록 좋은 그림과 함께

dgkim5360.tistory.com

 

반응형
LIST

맨위로버튼이미지

import random
from collections import deque, namedtuple
from typing import Dict, List, NamedTuple, Optional, Text, Tuple, Union
# from IPython.display import display, Math
from Account import Account
import math
from itertools import count
import os

import sys
import os.path as path
import numpy as np
import plotly as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
from Market import Market
from PIL import Image
from plotly import express as px
import torchvision
import time
import joblib
import pandas as pd
from dqn import DQN
from memory import ReplayMemory, Experience

action_kind = 21
max_episode = 5000
screen_height = 100
screen_width  = 140
data_size = 600
epsilon = 0.3
dis = 0.9

BATCH_SIZE = 8
# GAMMA = 0.999
GAMMA = 0.99
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
steps_done = 0
loss = any
WINDOW_START = 0
WINDOW_SIZE  = 1500

# for i in range(torch.cuda.device_count()):
#     print(torch.cuda.get_device_name(i))

# if gpu is to be used
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cuda")
# device_str = "cpu"
device_str = "cuda"
device = torch.device(device_str)
    
memory = ReplayMemory(BATCH_SIZE)
converter = torchvision.transforms.ToTensor()
market = Market()
train_net = DQN(device, screen_height, screen_width, action_kind, 32).to(device)
train_net = nn.DataParallel(train_net, device_ids=[0,1]).to(device)

episode_durations = []
    
optimizer = optim.RMSprop(train_net.parameters(), lr=0.000001)

# def select_action(df, idx):
#     try:
#         global steps_done
#         sample = random.random()
#         eps_threshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * steps_done / EPS_DECAY)
#         steps_done += 1
#         clslvl = ((df.loc[idx, "close"] - df.loc[idx, "closemin"]) * 40 // (df.loc[idx, "closemax"] - df.loc[idx, "closemin"]))
#         if sample > eps_threshold:
#             if df.loc[idx, "closemax"] == df.loc[idx, "close"]:
#                 action = clslvl
#                 return  action
#             elif df.loc[idx, "closemin"] == df.loc[idx, "close"]:
#                 action = clslvl
#                 return  action
#             else:
#                 return clslvl
#         else:
#             action = random.randrange(action_kind)
#             return  action
#     except Exception as ex:
#         exc_type, exc_obj, exc_tb = sys.exc_info()
#         fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#         print("`select_action -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))
#         return 0

def select_action(df, idx):
    return ((df.loc[idx, "close"] - df.loc[idx, "closemin"]) * (action_kind-1) // (df.loc[idx, "closemax"] - df.loc[idx, "closemin"]))
        
def get_chart(market, idx, max_data):
    img = market.get_chart(idx, max_data=max_data)
    if img is None:
        return None
    # if  idx > (df.index.max() - data_size - 1):
    #     return None
    # img = Image.fromarray(np.uint8(cm.gist_earth(plt.io.to_image(fig, format='png')*255)))
    # im = Image.fromarray(img, bytes=True)
    # im = Image.fromarray(np.uint8(cm.gist_earth(img))/255)
    # im = Image.fromarray(np.uint8(img)/255)
    # img = img.resize((700, 500), resample=Image.BICUBIC)
    # display(img)
    # img = Image.fromarray(cm.gist_earth(plt.io.to_image(fig, format='png'), bytes=True))
    chart = converter(img).unsqueeze(0)
    return chart

def plot_durations(last_chart, curr_chart):
    plt.figure()
    # plt.subplot(1,2,1)
    img = plt.imshow(last_chart.cpu().squeeze(0).permute(1, 2, 0).numpy(), interpolation='none')
    plt.title('Example extracted screen')
    plt.figure(2)
    # plt.subplot(1,2,2)
    plt.clf()
    durations_t = torch.tensor(episode_durations, dtype=torch.float)
    plt.title('Training...')
    plt.xlabel('Episode')
    plt.ylabel('Duration')
    plt.plot(durations_t.numpy())
    # plt.show()
    # Take 100 episode averages and plot them too
    if len(durations_t) >= 100:
        means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
        means = torch.cat((torch.zeros(99), means))
        plt.plot(means.numpy())

    img.set_data(curr_chart.cpu().squeeze(0).permute(1, 2, 0).numpy())
    plt.pause(0.01)  # pause a bit so that plots are updated
    # display.clear_output(wait=True)
    # display.display(plt.gcf())
            
def main():
    if path.exists("pt/train_net_{}.pt".format(device_str)):
        train_net.load_state_dict(torch.load("pt/train_net_{}.pt".format(device_str)))
        
    for _ in range(10):    
        df = market.get_data()
        # df = df.head(WINDOW_SIZE)

        for epoch in range(max_episode):
            account = Account(df, 50000000)
            account.reset()

            last_chart = get_chart(market, data_size, data_size)
            account.reset()

            for idx,_ in enumerate(df.index, start=(data_size + 1)):
                try:
                    since = time.time()
                    curr_chart = get_chart(market, idx, data_size)
                    if curr_chart is None or last_chart is None:
                        continue
                    else:
                        state = curr_chart - last_chart
                        
                    last_chart = curr_chart
                    reward = 0
                    num_action = select_action(df, idx)
                    reward, real_action = account.exec_action(2 if num_action == (action_kind - 1) else (1 if num_action == 0 else 0) , idx)
                    print("idx:%d==>action:%d,real_action==>%d price:%.2f"%(idx, num_action, real_action, df.loc[idx, 'close']))
                    # reward = torch.tensor([reward], device=device)
                    num_action = torch.tensor([[num_action]], device=device, dtype=torch.int64)
                                    
                    memory.push(curr_chart, num_action, reward)
                    if len(memory) >= BATCH_SIZE:
                        epsode = memory.pop(BATCH_SIZE)
                        batch = Experience(*zip(*epsode))
                        # non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
                        #                                       batch.next_state)), device=device, dtype=torch.bool)
                        # non_final_next_states = torch.cat([s for s in batch.next_state
                        #                                             if s is not None])
                        state_batch = torch.cat(batch.state)
                        action_batch = torch.cat(batch.action)
                        # reward_batch = torch.cat(batch.reward)
                        train_net.train()
                        state_action_values = train_net(state_batch).gather(1, action_batch)
                        # next_state_values = torch.zeros(BATCH_SIZE, device=device)
                        # next_state_values[non_final_mask] = train_net(non_final_next_states).max(1)[0].detach()
                        # expected_state_action_values = (next_state_values * GAMMA) + reward_batch
                        # print(action_batch)
                        # print(state_action_values)
                        optimizer.zero_grad()
                        criterion = nn.SmoothL1Loss()
                        loss = criterion(state_action_values, action_batch)
                        # print(loss)
                        # loss = criterion(state_action_values, action_batch.unsqueeze(1))
                        # loss = criterion(market_status, train_status)

                        # Optimize the model
                        loss.backward()
                        for param in train_net.parameters():
                            param.grad.data.clamp_(-1, 1)
                        optimizer.step()
                        print("epoch[%d:%d] epsode is next loss[%.10f]" % (epoch, idx, loss.item()))

                    if idx % TARGET_UPDATE == 0:
                        torch.save(train_net.state_dict(),"pt/train_net_{}.pt".format(device_str))
                    
                    spend = time.time() - since
                    print("idx:%d price [%.4f] unit[%.4f] used time[%.2f] agent rate:%.05f remind money:%.02f" 
                            % (idx, df.loc[idx, 'close'], account.unit, spend, account.rate, account.balance + account.unit * df.loc[idx, 'close']))
                    if account.is_bankrupt():
                        break
                    if idx == df.index.max():
                        break
                        # train_net.load_state_dict(train_net.state_dict())
                    # save_file_model(degreeQ)                
                except Exception as ex:
                    exc_type, exc_obj, exc_tb = sys.exc_info()
                    fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                    print("`recall_training -> exception! %s : %s %d" % (str(ex) , fname, exc_tb.tb_lineno))

        print("end training DQN")
    
    print('Complete Training')

if __name__ == "__main__":
    main()

main_buroto.py 파일입니다.

import random
from collections import deque, namedtuple

Experience = namedtuple(
    'Experience',
    ('state', 'action', 'reward')
)

class ReplayMemory(object):
    def __init__(self, capacity):
        self.memory = deque([],maxlen=capacity)

    def push(self, *args):
        """Save a transition"""
        self.memory.append(Experience(*args))

    def pop(self, batch_size):
        arr = []
        for _ in range(batch_size):
            arr.append(self.memory.popleft())
        return arr
    
    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)

memory.py 부분을 분리했습니다.

import torch.nn as nn
import torch.nn.functional as F

class DQN(nn.Module):
    def __init__(self, device, h, w, outputs, qsize):
        super(DQN, self).__init__()
        self.conv1 = nn.Conv2d(4, h*w, kernel_size=5, stride=2)
        self.bn1 = nn.BatchNorm2d(h*w)
        self.conv2 = nn.Conv2d(h*w, qsize, kernel_size=5, stride=2)
        self.bn2 = nn.BatchNorm2d(qsize)
        self.device = device
        self.conv3 = nn.Conv2d(qsize, qsize, kernel_size=5, stride=2)
        self.bn3 = nn.BatchNorm2d(qsize)

        # Number of Linear input connections depends on output of conv2d layers
        # and therefore the input image size, so compute it.
        def conv2d_size_out(size, kernel_size = 5, stride = 2):
            return (size - (kernel_size - 1) - 1) // stride  + 1
        convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
        convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
        # print("convw[%d]  convh[%d]" % (convw, convh))
        linear_input_size = 54 * qsize
        self.head = nn.Linear(linear_input_size, outputs)

    # Called with either one element to determine next action, or a batch
    # during optimization. Returns tensor([[left0exp,right0exp]...]).
    def forward(self, x):
        x = x.to(self.device)
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        return self.head(x.view(x.size(0), -1))

dqn.py를 분리했습니다.

main함수부터 설명 드리면 가장 이상적인 매매 프로세스인 저점에서 매수하고 고점에서 매도하는 방식을 rolling함수를 이용하여 구하였습니다.
그리고 이것을 전체 데이터에 적용을 하여 history에 저장합니다.
매매 길이에 따라서 수천프로이상의 수익이 발생할 것입니다.
이것은 말 그대로 이상적인 매매 프로세스 입니다.
백프로 매치되는 것은 아니지만 이상적인 grid world의 마르코프 실행 프로세서(MDP) 처럼은 우리도 이것을 MDP로 가정합니다.
이제 실행된 결과를 DQN(CNN)에 학습합니다.
optimize함수가 그 역할을 합니다.
loss함수는 nn.SmoothL1Loss를 사용하여 해당 S의 action a와 기대 이상 행동 a'를 구하여 둘의 차이를 구하여 계산합니다.
아쉽지만 windows에서는 NCCL이 적용되지 않아 pytorch함수에서 에러가 발생합니다.
NCCL은 nvidia의 GPU를 서로 연동하여 최상의 성능을 내는 Api lib입니다.
현재 ubuntu와 red hat에서만 지원하고 있습니다.
그래서 지금 저는 WSL2를 이용한 ubuntu LTS(최종안정화버젼)인 20.04.5버젼으로 이전을 하려고 하는데 windows10과 wsl2의 병행 운영은 엄청난 cpu 제원을 요구 하네요.
어쩔 수 없이 서버를 ubuntu로 다시 설치하고 노트북으로 접속하여 이용하는 방법을 실행하려고 하고 있습니다.
불행히도 제 노트북이 현재 아작이 난 상태라 알리에서 검색해서 LCD판넬을 주문한 상태입니다만 가장 빠른 날짜가 12월 15일 도착이라고 하네요.
Nvidia 홈페이지 상으로는 현재로써는 windows에 NCCL이 적용이 안되어서 여러장의 GPU카드가 있어도 한장의 효과만 있다는 거죠.
현재 소스에서 보시면 nn.DataParallel이 DQN을 감싸고 있습니다.
실제 성능탭으로 보면 그래도 nn.DataParallel을 적용하기 전에는 한개의 GPU만 사용하게 작동이 되었으나 지금은 어느 정도 반반씩 부담하여 사용하는게 보입니다.
중요한 점은

recall_history()함수에서 저장된 데이터를 recall_training 함수에서 DQN을 학습하게 됩니다. 학습된 결과는 pt/training_net.pt파일로 저장을 하게 됩니다. 저의 경우는 데이터가 너무 많은 관계로 과적합 상태를 보이나 로스차이가 그렇게 크지는 않습니다. 적당한 데이터의 크기는 5000~10000개 정도의 데이터를 반복 학습하면서 loss를 최소화 한다음 다시 데이터를 샘플링하고 적용하고 하는 반복 작업을 통하여 실제 DQN이 적은 loss로 동작하는지가 중요합니다. 

이 작업이 어느 정도 끝나면 다음은 main_agent.py 파일을 작성하여 실제 상황에서 detection을 발생하여 거래를 하게 하여 어느 정도의 수익률이 나오는지 분석을 해 보도록 하겠습니다.
저도 처음에는 강화학습이 데이터가 많이 적용되면 더욱 딕텍션이 안정될 거라 생각했지만 실제로는 데이터가 많을 수록 ai는 거래를 하지 않으려하네요.
프로그램을 조금 수정해서 pt파일을 여러 버젼으로 저장하는게 어떨까 제안 합니다.
다음 차에서 agent파일을 돌려 보면 알겠지만 DQN은 가장 안정화된 방법을 선택하려 합니다.
즉 매매를 하지 않아 수익도 손실도 없는 상태를 만들려고 합니다.
그렇지만 어떤 경우 훈련이 잘 진행되어 100%이상의 수익을 내기도 합니다.
아무런 if문 없이 수익을 낸다는 게 신기합니다만 retro.gym을 경험해 보신분들은 아실 겁니다.
사실 retro.gym을 훈련하는 과정에도 인간의 생각 판단 따위는 중요하지 않습니다.
훈련은 우연의 연속입니다.
다만 그 우연들 중에서 가장 스코어가 높은 쪽으로만 AI는 움직이려 합니다.
그렇게 하는것은 인간의 계산에서 epsilon값을 어떻게 판단하는냐에 따라 결과가 달라 질 겁니다.
epsilon을 0.5 정도로 하면 절반은 모험값으로 채워질 겁니다.
그리고 결과를 수익률 또는 남은 돈으로 정할 때 그 값이 최고가 되는 모델들만 history에 저장하고 학습을 돌린다면 엔진은 공격적인 매매를 해서 높은 수익률을 올릴 것입니다.
다음 편 실 agent 시뮬레이션 편을 기대 해 주세요.
 

 

반응형
LIST

+ Recent posts