Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

High freq execution #227

Merged
merged 10 commits into from
Jan 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions examples/trade/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Universal Trading for Order Execution with Oracle Policy Distillation
This is the experiment code for our AAAI 2021 paper "[Universal Trading for Order Execution with Oracle Policy Distillation](https://seqml.github.io/opd/opd_aaai21.pdf)", including the implementations of all the compared methods in the paper and a general reinforcement learning framework for order execution in quantitative finance.

## Abstract
As a fundamental problem in algorithmic trading, order execution aims at fulfilling a specific trading order, either liquidation or acquirement, for a given instrument. Towards effective execution strategy, recent years have witnessed the shift from the analytical view with model-based market assumptions to model-free perspective, i.e., reinforcement learning, due to its nature of sequential decision optimization. However, the noisy and yet imperfect market information that can be leveraged by the policy has made it quite challenging to build up sample efficient reinforcement learning methods to achieve effective order execution. In this paper, we propose a novel universal trading policy optimization framework to bridge the gap between the noisy yet imperfect market states and the optimal action sequences for order execution. Particularly, this framework leverages a policy distillation method that can better guide the learning of the common policy towards practically optimal execution by an oracle teacher with perfect information to approximate the optimal trading strategy. The extensive experiments have shown significant improvements of our method over various strong baselines, with reasonable trading actions.

### Citation
You are more than welcome to cite our paper:
```
@inproceedings{fang2021universal,
title={Universal Trading for Order Execution with Oracle Policy Distillation},
author={Fang, Yuchen and Ren, Kan and Liu, Weiqing and Zhou, Dong and Zhang, Weinan and Bian, Jiang and Yu, Yong and Liu, Tie-Yan},
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
year={2021}
}
```
2 changes: 0 additions & 2 deletions examples/trade/agent/basic.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from joblib import Parallel, delayed
from numba import njit, prange
from tianshou.policy import BasePolicy
from tianshou.data import Batch
import numpy as np
Expand Down
40 changes: 27 additions & 13 deletions examples/trade/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import json
import os
import agent
import model
import network
import policy
import random
import tianshou as ts
Expand Down Expand Up @@ -48,7 +48,15 @@ def setup_seed(seed):

class BaseExecutor(object):
def __init__(
self, log_dir, resources, env_conf, optim=None, policy_conf=None, network=None, policy_path=None, seed=None,
self,
log_dir,
resources,
env_conf,
optim=None,
policy_conf=None,
network_conf=None,
policy_path=None,
seed=None,
):
"""A base class for executor

Expand All @@ -62,8 +70,8 @@ def __init__(
:type optim: dict, optional
:param policy_conf: Configurations for the RL algorithm, defaults to None
:type policy_conf: dict, optional
:param network: Configurations for policy network, defaults to None
:type network: dict, optional
:param network_conf: Configurations for policy network_conf, defaults to None
:type network_conf: dict, optional
:param policy_path: If is not None, would load the policy from this path, defaults to None
:type policy_path: string, optional
:param seed: Random seed, defaults to None
Expand All @@ -90,17 +98,23 @@ def __init__(
self.policy = getattr(agent, policy_conf["name"])(policy_conf["config"])
# print(self.policy)
else:
assert not network is None
if "extractor" in network.keys():
net = getattr(model, network["extractor"]["name"] + "_Extractor")(
device=self.device, **network["config"]
assert not network_conf is None
if "extractor" in network_conf.keys():
net = getattr(network, network_conf["extractor"]["name"] + "_Extractor")(
device=self.device, **network_conf["config"]
)
else:
net = getattr(model, network["name"] + "_Extractor")(device=self.device, **network["config"])
net = getattr(network, network_conf["name"] + "_Extractor")(
device=self.device, **network_conf["config"]
)
net.to(self.device)
actor = getattr(model, network["name"] + "_Actor")(extractor=net, device=self.device, **network["config"])
actor = getattr(network, network_conf["name"] + "_Actor")(
extractor=net, device=self.device, **network_conf["config"]
)
actor.to(self.device)
critic = getattr(model, network["name"] + "_Critic")(extractor=net, device=self.device, **network["config"])
critic = getattr(network, network_conf["name"] + "_Critic")(
extractor=net, device=self.device, **network_conf["config"]
)
critic.to(self.device)
self.optim = torch.optim.Adam(
list(actor.parameters()) + list(critic.parameters()),
Expand Down Expand Up @@ -180,7 +194,7 @@ def __init__(
io_conf,
optim=None,
policy_conf=None,
network=None,
network_conf=None,
policy_path=None,
seed=None,
share_memory=False,
Expand Down Expand Up @@ -210,7 +224,7 @@ def __init__(
:param buffer_size: The size of replay buffer, defaults to 200000
:type buffer_size: int, optional
"""
super().__init__(log_dir, resources, env_conf, optim, policy_conf, network, policy_path, seed)
super().__init__(log_dir, resources, env_conf, optim, policy_conf, network_conf, policy_path, seed)
single_env = getattr(env, env_conf["name"])
env_conf = merge_dicts(env_conf, train_paths)
env_conf["log"] = True
Expand Down
5 changes: 5 additions & 0 deletions examples/trade/network/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .ppo import *
from .qmodel import *
from .teacher import *
from .util import *
from .opd import *
74 changes: 74 additions & 0 deletions examples/trade/network/opd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from copy import deepcopy
import sys

from tianshou.data import to_torch


class OPD_Extractor(nn.Module):
def __init__(self, device="cpu", **kargs):
super().__init__()
self.device = device
hidden_size = kargs["hidden_size"]
fc_size = kargs["fc_size"]
self.cnn_shape = kargs["cnn_shape"]

self.rnn = nn.GRU(64, hidden_size, batch_first=True)
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True)
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),)
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),)
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),)

self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(),
)

def forward(self, inp):
inp = to_torch(inp, dtype=torch.float32, device=self.device)
teacher_action = inp[:, 0]
inp = inp[:, 1:]
seq_len = inp[:, -1].to(torch.long)
batch_size = inp.shape[0]
raw_in = inp[:, : 6 * 240]
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1)
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2)
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2)
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1)
rnn_in = self.raw_fc(cnn_out)
rnn2_in = self.dnn(dnn_in)
rnn2_out = self.rnn2(rnn2_in)[0]
rnn_out = self.rnn(rnn_in)[0]
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len]
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len]
# dnn_out = self.dnn(dnn_in)
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1)
feature = self.fc(fc_in)
return feature, teacher_action / 2


class OPD_Actor(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1))
self.device = device

def forward(self, obs, state=None, info={}):
feature, self.teacher_action = self.extractor(obs)
out = self.layer_out(feature)
return out, state


class OPD_Critic(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.value_out = nn.Linear(32, 1)
self.device = device

def forward(self, obs, state=None, info={}):
feature, self.teacher_action = self.extractor(obs)
return self.value_out(feature).squeeze(dim=-1)
79 changes: 79 additions & 0 deletions examples/trade/network/ppo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from copy import deepcopy
import sys

from tianshou.data import to_torch


class PPO_Extractor(nn.Module):
def __init__(self, device="cpu", **kargs):
super().__init__()
self.device = device
hidden_size = kargs["hidden_size"]
fc_size = kargs["fc_size"]
self.cnn_shape = kargs["cnn_shape"]

self.rnn = nn.GRU(64, hidden_size, batch_first=True)
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True)
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),)
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),)
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),)

self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(),
)

def forward(self, inp):
inp = to_torch(inp, dtype=torch.float32, device=self.device)
# inp = torch.from_numpy(inp).to(torch.device('cpu'))
seq_len = inp[:, -1].to(torch.long)
batch_size = inp.shape[0]
raw_in = inp[:, : 6 * 240]
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1)
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2)
dnn_in = inp[:, -19:-1].reshape(batch_size, -1, 2)
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1)
assert not torch.isnan(cnn_out).any()
rnn_in = self.raw_fc(cnn_out)
assert not torch.isnan(rnn_in).any()
rnn2_in = self.dnn(dnn_in)
assert not torch.isnan(rnn2_in).any()
rnn2_out = self.rnn2(rnn2_in)[0]
assert not torch.isnan(rnn2_out).any()
rnn_out = self.rnn(rnn_in)[0]
assert not torch.isnan(rnn_out).any()
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len]
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len]
# dnn_out = self.dnn(dnn_in)
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1)
self.feature = self.fc(fc_in)
return self.feature


class PPO_Actor(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1))
self.device = device

def forward(self, obs, state=None, info={}):
self.feature = self.extractor(obs)
assert not (torch.isnan(self.feature).any() | torch.isinf(self.feature).any()), f"{self.feature}"
out = self.layer_out(self.feature)
return out, state


class PPO_Critic(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.value_out = nn.Linear(32, 1)
self.device = device

def forward(self, obs, state=None, info={}):
self.feature = self.extractor(obs)
return self.value_out(self.feature).squeeze(dim=-1)
52 changes: 52 additions & 0 deletions examples/trade/network/qmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from copy import deepcopy
import sys

from tianshou.data import to_torch


class RNNQModel(nn.Module):
def __init__(self, device="cpu", out_shape=10, **kargs):
super().__init__()
self.device = device
hidden_size = kargs["hidden_size"]
fc_size = kargs["fc_size"]
self.cnn_shape = kargs["cnn_shape"]

self.rnn = nn.GRU(64, hidden_size, batch_first=True)
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True)
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),)
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),)
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),)

self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 32),
nn.ReLU(),
nn.Linear(32, out_shape),
)

def forward(self, obs, state=None, info={}):
inp = to_torch(obs, dtype=torch.float32, device=self.device)
inp = inp[:, 182:]
seq_len = inp[:, -1].to(torch.long)
batch_size = inp.shape[0]
raw_in = inp[:, : 6 * 240]
raw_in = torch.cat((torch.zeros_like(inp[:, : 6 * 30]), raw_in), dim=-1)
raw_in = raw_in.reshape(-1, 30, 6).transpose(1, 2)
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2)
cnn_out = self.cnn(raw_in).view(batch_size, 9, -1)
rnn_in = self.raw_fc(cnn_out)
rnn2_in = self.dnn(dnn_in)
rnn2_out = self.rnn2(rnn2_in)[0]
rnn_out = self.rnn(rnn_in)[0]
rnn_out = rnn_out[torch.arange(rnn_out.size(0)), seq_len]
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len]
# dnn_out = self.dnn(dnn_in)
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1)
out = self.fc(fc_in)
return out, state
70 changes: 70 additions & 0 deletions examples/trade/network/teacher.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from copy import deepcopy
import sys

from tianshou.data import to_torch


class Teacher_Extractor(nn.Module):
def __init__(self, device="cpu", feature_size=180, **kargs):
super().__init__()
self.device = device
hidden_size = kargs["hidden_size"]
fc_size = kargs["fc_size"]
self.cnn_shape = kargs["cnn_shape"]

self.rnn = nn.GRU(64, hidden_size, batch_first=True)
self.rnn2 = nn.GRU(64, hidden_size, batch_first=True)
self.dnn = nn.Sequential(nn.Linear(2, 64), nn.ReLU(),)
self.cnn = nn.Sequential(nn.Conv1d(self.cnn_shape[1], 3, 3), nn.ReLU(),)
self.raw_fc = nn.Sequential(nn.Linear((self.cnn_shape[0] - 2) * 3, 64), nn.ReLU(),)

self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 32), nn.ReLU(),
)

def forward(self, inp):
inp = to_torch(inp, dtype=torch.float32, device=self.device)
inp = inp[:, 182:]
seq_len = inp[:, -1].to(torch.long)
batch_size = inp.shape[0]
raw_in = inp[:, : 6 * 240].reshape(-1, 30, 6).transpose(1, 2)
dnn_in = inp[:, 6 * 240 : -1].reshape(batch_size, -1, 2)
cnn_out = self.cnn(raw_in).view(batch_size, 8, -1)
rnn_in = self.raw_fc(cnn_out)
rnn2_in = self.dnn(dnn_in)
rnn2_out = self.rnn2(rnn2_in)[0]
rnn_out = self.rnn(rnn_in)[0][:, -1, :]
rnn2_out = rnn2_out[torch.arange(rnn2_out.size(0)), seq_len]
# dnn_out = self.dnn(dnn_in)
fc_in = torch.cat((rnn_out, rnn2_out), dim=-1)
self.feature = self.fc(fc_in)
return self.feature


class Teacher_Actor(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.layer_out = nn.Sequential(nn.Linear(32, out_shape), nn.Softmax(dim=-1))
self.device = device

def forward(self, obs, state=None, info={}):
self.feature = self.extractor(obs)
out = self.layer_out(self.feature)
return out, state


class Teacher_Critic(nn.Module):
def __init__(self, extractor, out_shape, device=torch.device("cpu"), **kargs):
super().__init__()
self.extractor = extractor
self.value_out = nn.Linear(32, 1)
self.device = device

def forward(self, obs, state=None, info={}):
self.feature = self.extractor(obs)
return self.value_out(self.feature).squeeze(-1)
Loading