diff --git a/Dockerfile b/Dockerfile index 4bc5188..4f15b05 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,7 +61,7 @@ RUN pip install --quiet --no-cache-dir \ 'quantstats>=0.0.37' \ 'PyPortfolioOpt>=1.5.1' \ 'Riskfolio-Lib>=3.3.0' \ - 'python-telegram-bot>=13.4' \ + 'python-telegram-bot==13.5' \ 'dill' \ 'lz4' \ 'blosc' diff --git a/docs/docker_readme.md b/docs/docker_readme.md index 2a9d6bd..d9e3dc0 100644 --- a/docs/docker_readme.md +++ b/docs/docker_readme.md @@ -43,8 +43,11 @@ If you want to read the logs of the bot: kubectl logs Optional: Go in the admin panel and delete the user admin and testdb, that are completely unnecessary. +Note: The -#Troubleshooting +#Troubleshooting +If kubectl command is not found replace in the script the "kubectl" by "minikube kubectl -- ". + If for any reason the sequence_start_first_time.sh lead to an error, you need to clean properly Kubernetes before repeating the step. It means obviously removing the deployments with kubectl delete deployment @@ -64,6 +67,7 @@ In addition, in kubernetes/postgres.yml, the hostPath under PersistentVolume path: /data/py-trading-bot-postgres-pv Needs to be changed, otherwise the same data will be loaded again when the permanent volumes are recreated. + ## Error auth_user_username_key In the logs of py-trading-bot pod (kubectl logs ) you find: diff --git a/docs/toubleshooting.md b/docs/toubleshooting.md index f4af45c..f66e016 100644 --- a/docs/toubleshooting.md +++ b/docs/toubleshooting.md @@ -9,4 +9,5 @@ We try to make the code as robust as possible, but some errors cannot be avoided |Nothing happens in Telegram when starting the bot | A chat id is added upon receiving the "/start" command. So try the command /start in Telegram, it should be displayed in the shell of the bot| |Telegram does not find the bot| Check that you inserted the token in a file trading_bot/etc/TELEGRAM_TOKEN ? | |Several chats are found and no message goes out | Go in reporting/telegram_sub.py and modify the function send_to_all. Replace the for loop by a chat_id=self.chat_ids[index_of_your_choice]. | +|You don't understand why an order was not performed | Have a look in the trade log, it is there for this purpose | diff --git a/py-trading-bot/core/constants.py b/py-trading-bot/core/constants.py index 1e73789..45009df 100644 --- a/py-trading-bot/core/constants.py +++ b/py-trading-bot/core/constants.py @@ -363,7 +363,8 @@ Delisting date of the stocks if relevant ''' NASDAQ_DELIST={ - "ATVI":"2023-10-20" + "ATVI":"2023-10-20", + "SGEN":"2023-12-15" } ''' diff --git a/py-trading-bot/core/data_manager.py b/py-trading-bot/core/data_manager.py index 102eff6..af60e19 100644 --- a/py-trading-bot/core/data_manager.py +++ b/py-trading-bot/core/data_manager.py @@ -60,7 +60,7 @@ def save_data( data=vbt.YFData.fetch(symbols,start=start_date,end=end_date,\ timeframe='1d',missing_index="drop") BASE_DIR = Path(__file__).resolve().parent.parent - data.to_hdf(file_path=os.path.join(BASE_DIR,'saved_cours/'+selector.upper()+'_period.h5')) + data.to_hdf(path_or_buf=os.path.join(BASE_DIR,'saved_cours/'+selector.upper()+'_period.h5')) def retrieve_data_offline( o, @@ -168,9 +168,9 @@ def retrieve_debug( ''' import constants - selector="industry" + selector="NASDAQ" start_date='2007-01-01' - end_date='2023-08-01' + end_date='2024-01-03' if selector=="CAC40": all_symbols=constants.CAC40 diff --git a/py-trading-bot/core/presel.py b/py-trading-bot/core/presel.py index 98fb67d..225da50 100644 --- a/py-trading-bot/core/presel.py +++ b/py-trading-bot/core/presel.py @@ -171,8 +171,6 @@ def symbols_simple_to_complex(self,symbol_simple,ent_or_ex): ''' if "entries" not in self.ust.__dir__(): self.ust.run() - - return self.ust.symbols_simple_to_complex(symbol_simple,ent_or_ex) #UnderlyingStrat @@ -233,7 +231,7 @@ def calculate( if ((not self.no_ust and not short and self.ust.exits.loc[i, symbol_complex]) or #not short and (not self.no_ust and short and self.ust.exits_short.loc[i, symbol_complex]) or (self.no_ust and symbol_simple not in self.candidates[short_to_str[short]][i])): - + self.pf[short_to_str[short]].remove(symbol_simple) self.capital+=self.order_size @@ -347,6 +345,8 @@ def sub_sub(self, ((not self.blocked and not self.blocked_im) or ((self.blocked or self.blocked_im) and not short))): self.candidates[short_to_str[short]][i].append(symbol_simple) + return True + return False def sub( self, @@ -361,6 +361,7 @@ def sub( i: index short: order direction ''' + self.sorting(i, short=short) if self.sorted is not None: for e in self.sorted: @@ -374,7 +375,11 @@ def sub( if len(t)>0: symbol=t.index[0] v=self.sorting_criterium.loc[i,symbol] - self.sub_sub(i, symbol,v, short) + pursue=self.sub_sub(i, symbol,v, short) + if not pursue: + break + else: + break else: raise ValueError("both sorted and sorted_df are None") @@ -651,7 +656,7 @@ def __init__(self,period: str,**kwargs): self.dur=ic.VBTKAMATREND.run(self.close).duration #self.dur=ic.VBTSUPERTREND.run(self.high,self.low,self.close).duration self.no_ust=True - self.calc_all=True + self.calc_all=True #otherwise it is not possible to determine what is excluded self.last_short=False def sorting( @@ -686,7 +691,8 @@ def perform(self, r, keep:bool=False, **kwargs): r.concat(self.st.name.capitalize()+", " + "direction " + direction + ", stockex: " + self.ust.exchange +\ ", action duration: " +str(self.out)) - + r.concat("Present "+ self.st.name + " candidates: "+str(candidates) + " hold since: "+ str(self.hold_dur) + " days") + r.ss_m.clean_excluded(self.st.name, self.excluded) r.ss_m.order_nosubstrat(candidates_to_YF(self.ust.symbols_to_YF,candidates), self.ust.exchange, self.st.name, self.last_short,keep=keep) @@ -811,11 +817,7 @@ def underlying(self): def sorting_g(self): self.sorted_rank=self.divergence.rank(axis=1, ascending=True) #small divergence better self.sorting_criterium=self.divergence - -class PreselDivergenceSecond(PreselDivergence): - def underlying(self): - self.underlying_creator("StratDiv") - + class PreselDivergenceBlocked(PreselDivergence): ''' Like preselect_divergence, but the mechanism is blocked when macro_trend is bear diff --git a/py-trading-bot/core/presel_classic.py b/py-trading-bot/core/presel_classic.py index c31fd9c..b2dcf82 100644 --- a/py-trading-bot/core/presel_classic.py +++ b/py-trading-bot/core/presel_classic.py @@ -120,20 +120,24 @@ def fill_allocations_underlying(self): #transform the entries and exits in 1 and 0 t=SIGNALTOSIZE.run( - self.ust.entries, - self.ust.exits, - self.ust.entries_short, - self.ust.exits_short, + self.ust_classic.entries, + self.ust_classic.exits, + self.ust_classic.entries_short, + self.ust_classic.exits_short, idx_arr=[idx_arr] ) self.expanded_allocations= self.expand_alloc(idx_arr, self.pf_opt._allocations) #add the weight self.used_allocations=remove_multi(t.bought-t.sold)*remove_multi(self.expanded_allocations) self.max_alloc() - self.size=self.new_alloc #remove_multi(self.new_alloc)* remove_multi(size_underlying) + self.size=self.new_alloc def apply_underlying_strat(self, strat_name): - self.ust=name_to_ust_or_presel(strat_name,self.period, symbol_index=self.symbol_index) + if "ust" in self.__dir__(): #for handle "live" strategy + self.ust_classic=name_to_ust_or_presel(strat_name,self.period, input_ust=self.ust) + else: + self.ust_classic=name_to_ust_or_presel(strat_name,self.period, symbol_index=self.symbol_index) + self.fill_allocations_underlying() #as function from_optimizer diff --git a/py-trading-bot/core/strat.py b/py-trading-bot/core/strat.py index 4109b52..ba9959d 100644 --- a/py-trading-bot/core/strat.py +++ b/py-trading-bot/core/strat.py @@ -498,19 +498,21 @@ def get_return(self): #benchmark_return makes sense only for bull delta=pf.total_return().values[0] return delta - - def perform_StratCandidates(self, r, st_name): + + def perform(self, r): ''' Perform during the report an underlying strategy on the candidates belonging to StratCandidates Arguments ---------- r: report - st_name: name of the strategy ''' - from orders.models import Strategy, StratCandidates #needs to be loaded here, as it will work only if Django is loaded - st, _=Strategy.objects.get_or_create(name=st_name) - st_actions, _=StratCandidates.objects.get_or_create(strategy=st) #.id + from orders.models import StratCandidates #needs to be loaded here, as it will work only if Django is loaded + + if self.st is None: + raise ValueError("st should be defined to use perform on underlying strategy") + + st_actions, _=StratCandidates.objects.get_or_create(strategy=self.st) #.id st_symbols=st_actions.retrieve() for symbol in intersection(self.symbols,st_symbols): @@ -520,17 +522,9 @@ def perform_StratCandidates(self, r, st_name): symbol_complex_ent_normal=self.symbols_simple_to_complex(symbol,"ent") symbol_complex_ex_normal=self.symbols_simple_to_complex(symbol,"ex") target_order=self.get_last_decision(symbol_complex_ent_normal,symbol_complex_ex_normal) - r.display_last_decision(symbol,target_order, st_name) + r.display_last_decision(symbol,target_order, self.st.name) - r.ss_m.add_target_quantity(symbol, st_name, target_order) - - def perform(self, r, st_name:str=None): #default - ''' - See perform_StratCandidates - ''' - if st_name is None: - raise ValueError("perform for underlying strategy should have an argument st_name") - self.perform_StratCandidates(r, st_name) + r.ss_m.add_target_quantity(symbol, self.st.name, target_order) ###production functions def get_last_decision(self, symbol_complex_ent: str, symbol_complex_ex: str) -> int: @@ -640,8 +634,8 @@ def __init__(self, } super().__init__(period,strat_arr=a,**kwargs ) - -class StratDiv(UnderlyingStrat): + +class StratDiv2(UnderlyingStrat): ''' Underlying strategy for divergence preselection ''' @@ -654,6 +648,29 @@ def __init__(self, "CDL3BLACKCROWS"] }} + super().__init__(period,strat_arr=a,**kwargs ) + +class StratDiv(UnderlyingStrat): + ''' + Underlying strategy for divergence preselection + ''' + def __init__(self, + period: numbers.Number, + **kwargs): + a={'bull': { + 'ent': ['BBANDS', 'CDL3BLACKCROWS'], + 'ex': ['ULTOSC20', 'CDLHIKKAKE','CDLABANDONEDBABY', 'CDL3BLACKCROWS','CDLHIKKAKEMOD'] + }, + 'bear': { + 'ent': ['CDLHANGINGMAN', 'CDLSTICKSANDWICH', 'CDL3LINESTRIKE'], + 'ex': ['STOCH', 'BBANDS', 'CDLBELTHOLD', 'CDLXSIDEGAP3METHODS'] + }, + 'uncertain': { + 'ent': ['KAMA'], + 'ex': ['WILLR','ULTOSC20','ULTOSC25','CDL3LINESTRIKE','CDLDARKCLOUDCOVER', 'CDL3INSIDE'] + } + } + super().__init__(period,strat_arr=a,**kwargs ) class StratTestSimple(UnderlyingStrat): diff --git a/py-trading-bot/ml/ml.py b/py-trading-bot/ml/ml.py index afa2e16..01827f0 100644 --- a/py-trading-bot/ml/ml.py +++ b/py-trading-bot/ml/ml.py @@ -5,6 +5,10 @@ @author: maxime """ +if __name__=="__main__": + import sys + sys.path.append("..") + from core.data_manager import retrieve_data_offline from core.constants import BEAR_PATTERNS, BULL_PATTERNS from core.common import candidates_to_YF, remove_multi @@ -25,7 +29,7 @@ from sklearn import metrics from sklearn.ensemble import RandomForestRegressor -from keras import Sequential +from keras import Sequential, callbacks from keras.layers import LSTM, Dense #Object to train the models offline @@ -212,7 +216,7 @@ def __init__( period: period of time for which we shall retrieve the data indexes: main indexes used to download local data ''' - for k in ["indexes"]: + for k in ["period","indexes"]: setattr(self,k,locals()[k]) for key in ["close","open","low","high","data","volume"]: @@ -220,15 +224,14 @@ def __init__( setattr(self,key+"_ind_dic",{}) #len_min save the minimum length for all data - len_min=None + self.len_min=None for ind in self.indexes: retrieve_data_offline(self,ind,period) self.data_dic[ind]=self.data for d in ["Close","Open","Low","High","Volume"]: getattr(self,d.lower()+"_dic")[ind]=self.data_dic[ind].get(d) getattr(self,d.lower()+"_ind_dic")[ind]=self.data_ind.get(d) - self.len_min=len(self.close_dic[ind]) if len_min is None else min(self.close_dic[ind], self.len_min) - + self.len_min=len(self.close_dic[ind]) if self.len_min is None else min(len(self.close_dic[ind]), self.len_min) self.features_name=[] self.prod=False self.steps=None @@ -242,23 +245,15 @@ def load_model_docu(self, with open(os.path.dirname(__file__)+"/models/"+model_name+".json") as f: d = json.load(f) for k in d: - setattr(self,k,d[k]) - - if "clf" in self.__dir__(): - if self.clf.__class__ == Sequential: - self.model_type="LSTM" - elif self.clf.__class__ == MLPRegressor: - self.model_type="MLP" - else: - self.model_type="Forest" - else: - print("model type not loaded by docu loading, import the model first using load_model") + if k not in ["indexes"]: + setattr(self,k,d[k]) def save_model_docu(self, model_name:str ): d={} - for k in ["features_name","model_type", "preprocessing", "next_day_price","distance","lag"]: + for k in ["features_name","model_type", "preprocessing", "next_day_price","distance","lag", + "indexes","period","n_neurons","activation","reduce_memory_usage"]: try: d[k]=getattr(self,k) except: @@ -280,7 +275,8 @@ def prepare(self, model_type:str="MLP", steps:int=200, features_name:list=None, - prod:bool=False + prod:bool=False, + reduce_memory_usage: bool=True ): ''' Prepare the data for the training, or load it @@ -297,11 +293,13 @@ def prepare(self, model_type: what kind of model should be trained? steps: similar to lag but for LSTM model. So number of timesteps to consider for the training of the model features_name: explicit features name list - prod: to be used for production + prod: to be used for production, + reduce_memory_usage: use a method to reduce how much memory we use, is against performance. Concretely create a 4th dimension, and we optimized for each symbol separately ''' - for k in ["test_size","preprocessing","next_day_price","distance", "model_type","features_name","lag", "prod"]: + for k in ["test_size","preprocessing","next_day_price","distance", "model_type","features_name","lag", "prod","reduce_memory_usage"]: setattr(self,k,locals()[k]) + if model_type=="LSTM": self.steps=steps else: @@ -309,9 +307,9 @@ def prepare(self, if data_name is None: self.defi_x() - self.x_df, self.x_train, self.x_test, self.x_scaling=self.flatten(self.all_x) + self.x_df, self.x_train, self.x_test=self.flatten(self.all_x) self.defi_y() - self.y_df, self.y_train, self.y_test, self.y_scaling=self.flatten(self.all_y, y_bool=True) + self.y_df, self.y_train, self.y_test=self.flatten(self.all_y, y_bool=True) else: self.x_df=pd.read_csv("x_"+data_name+".csv",index_col=[0,1,2],parse_dates=True) self.y_df=pd.read_csv("y_"+data_name+".csv",index_col=[0,1,2],parse_dates=True) @@ -393,7 +391,8 @@ def create_empty_x_df(self, ind:str, s:str): def create_timesteps(self, arr_total, df, y_bool:bool=False): ''' - Function for LSTM to create the timesteps, it means slide a window over the signal + Function for LSTM to create the timesteps, it means slide a window over the signal. + Return an array of dimension 4 for x [symbol, batch, timesteps, features] Arguments ---------- @@ -402,15 +401,20 @@ def create_timesteps(self, arr_total, df, y_bool:bool=False): y_bool: is it for the output? ''' arr_total_3d=None - if y_bool: - arr_temp_total=np.reshape(df.values[self.steps:],(1,df[self.steps:].shape[0], df[self.steps:].shape[1])) + if self.reduce_memory_usage: + arr_temp_total=np.reshape(df.values[self.steps:],(1,df[self.steps:].shape[0], df[self.steps:].shape[1])) + else: + arr_temp_total=df.values[self.steps:] else: for ii in range(df.shape[0]-self.steps): arr_temp_total=np.reshape(df.values[ii:ii+self.steps],(1, self.steps, df.shape[1])) arr_total_3d = np.vstack((arr_total_3d,arr_temp_total)) if arr_total_3d is not None else arr_temp_total - arr_temp_total=np.reshape(arr_total_3d,(1,arr_total_3d.shape[0],arr_total_3d.shape[1],arr_total_3d.shape[2])) - + if self.reduce_memory_usage: + arr_temp_total=np.reshape(arr_total_3d,(1,arr_total_3d.shape[0],arr_total_3d.shape[1],arr_total_3d.shape[2])) + else: + arr_temp_total=arr_total_3d + del arr_total_3d arr_total= np.vstack((arr_total, arr_temp_total)) if arr_total is not None else arr_temp_total @@ -426,7 +430,7 @@ def flatten( Function to put the input in the right shape for the training. For MLP, it means in the shape (batch, features) - For LSTM, it means in the shape (batch, time steps, features). In this case we also create a different arr_scaling + For LSTM, it means in the shape (batch, time steps, features). as the scaler needs a 2d array. Arguments @@ -437,7 +441,6 @@ def flatten( arr_total=None arr_train=None arr_test=None - arr_scaling=None ts={} if "window_start" not in self.__dir__(): @@ -456,8 +459,8 @@ def flatten( #somehow vbt is designed with the columns in the other orders so to say, which lead to this very computer intensive function for ind in self.indexes: #CAC, DAX, NASDAQ + print("flattening: "+ind) total_len=len(self.close_dic[ind].index) - if not self.prod: learn_len=int(math.floor((1-self.test_size)*total_len)) test_len=total_len-learn_len @@ -475,6 +478,7 @@ def flatten( learn_range=[i for i in range(0,test_window_start)]+[i for i in range(test_window_end,self.len_min)] for s in self.close_dic[ind].columns: + print("flattening: "+s) dfs=[] for col in input_arr[ind]: dfs.append(ts[ind][col][s].rename(col)) @@ -492,26 +496,21 @@ def flatten( if self.model_type=="LSTM": #the different symbols needs to be separated to avoid that the last samples of one symbol are used for the next symbol eval arr_total=self.create_timesteps(arr_total, df, y_bool=y_bool) - if not self.prod: arr_test=self.create_timesteps(arr_test, df_temp_test, y_bool=y_bool) arr_train=self.create_timesteps(arr_train, df_temp_train, y_bool=y_bool) - arr_scaling=np.vstack((arr_scaling,df.values)) if arr_scaling is not None else df.values else: #for MLP, there is dependency between the steps, so we can put all indexes together in one df arr_total=np.vstack((arr_total,df.values)) if arr_total is not None else df.values if not self.prod: arr_test=np.vstack((arr_test,df_temp_test.values)) if arr_test is not None else df_temp_test.values arr_train=np.vstack((arr_train,df_temp_train.values)) if arr_train is not None else df_temp_train.values - - if self.model_type!="LSTM": - arr_scaling=arr_total del df if not self.prod: df_temp_test, df_temp_train - return arr_total, arr_train, arr_test, arr_scaling + return arr_total, arr_train, arr_test def unflatten(self, df: pd.DataFrame, @@ -527,7 +526,7 @@ def unflatten(self, out={} out2={} - if self.clf.__class__ == Sequential: + if self.clf.__class__ == Sequential and self.model_type=="LSTM": for ind in self.indexes: out[ind]={} for i, s in enumerate(self.close_dic[ind].columns): @@ -554,64 +553,12 @@ def unflatten(self, out2[ind]=pd.DataFrame(data=out[ind],index=pd.unique(sub_df.index.get_level_values(0))) return out2 - def scale( - self, - input_arr, - y_bool:bool=False, - inverse: bool=False, - predict: bool=False, - model=None - ): - ''' - Scale or descaled arrays that are in dimension 3 or 4 - - Arguments - ---------- - input_arr: array to scale/unscale - y_bool: is this array for the output? - inverse: should we unscale instead of scale? - predict: should we make a prediction? - model: model to be used for the prediction - ''' - scaled=None - - if y_bool or predict: - scaler=self.scaler_y - - for j in range(input_arr.shape[0]): - if inverse or predict: - if predict: - arr=model.predict(input_arr[j,:,:,:],batch_size=input_arr.shape[1]) - t=scaler.inverse_transform(arr) - else: - t=scaler.inverse_transform(input_arr[j,:,:]) - else: - t=scaler.transform(input_arr[j,:,:]) - t=np.reshape(t, (1, t.shape[0], t.shape[1])) - scaled=np.vstack((scaled,t)) if scaled is not None else t - del t - else: - scaler=self.scaler_x - - for i in range(input_arr.shape[0]): - scaled_3d=None - for j in range(input_arr.shape[1]): - if inverse: - t=scaler.inverse_transform(input_arr[i,j,:,:]) - else: - t=scaler.transform(input_arr[i,j,:,:]) - t=np.reshape(t, (1, t.shape[0], t.shape[1])) - scaled_3d=np.vstack((scaled_3d,t)) if scaled_3d is not None else t - scaled_3d=np.reshape(scaled_3d,(1,scaled_3d.shape[0], scaled_3d.shape[1], scaled_3d.shape[2])) - scaled=np.vstack((scaled,scaled_3d)) if scaled is not None else scaled_3d - del scaled_3d, t - - return scaled - def train( self, model_name:str, - n_epochs: int=100 + n_epochs: int=100, + n_neurons: int=32, + activation: str=None ): ''' Train and save the machine learning model @@ -619,60 +566,86 @@ def train( Arguments ---------- model_name: how do we want to name the model? - n_epochs: number of epochs for the training + n_epochs: number of epochs for the training + activation: name of the activation function ''' - self.model_name=model_name - + for k in ["model_name","n_neurons","activation"]: + setattr(self,k,locals()[k]) + if self.model_type in ["MLP","LSTM"]: self.scaler_x = StandardScaler() - self.scaler_x.fit(self.x_scaling) - - if self.model_type=="LSTM": - scaled_x_train=self.scale(self.x_train) - else: - scaled_x_train=self.scaler_x.transform(self.x_train) + scaled_x_train=self.scaler_x.fit_transform(self.x_train.reshape(-1,self.x_train.shape[-1])).reshape(self.x_train.shape) #transform above dim 2 self.scaler_y = StandardScaler() - self.scaler_y.fit(self.y_scaling) - - if self.model_type=="LSTM": - scaled_y_train=self.scale(self.y_train, y_bool=True) - else: - scaled_y_train=self.scaler_y.transform(self.y_train) - - #free memory - self.x_scaling=None - self.y_scaling=None + scaled_y_train=self.scaler_y.fit_transform(self.y_train.reshape(-1,self.y_train.shape[-1])).reshape(self.y_train.shape) #transform above dim 2 if self.model_type=="MLP": + #Kera + self.clf = Sequential() + self.clf.add(Dense(self.n_neurons, input_shape=(scaled_x_train.shape[1],), activation=activation)) + self.clf.add(Dense(self.n_neurons, activation=activation)) + self.clf.add(Dense(self.n_neurons, activation=activation)) + self.clf.add(Dense(self.n_neurons, activation=activation)) + self.clf.add(Dense(1, activation=activation)) + self.clf.compile(loss='mean_squared_error', optimizer='adam') + print(self.clf.summary()) + ''' + #Same with sklearn, use whatever you want self.clf = MLPRegressor( #activation="tanh", # solver='lbfgs', alpha=1e-5, - hidden_layer_sizes=(40, 4), + hidden_layer_sizes=(self.n_neurons, 4), #activation{‘identity’, ‘logistic’, ‘tanh’, ‘relu’}, default=’relu’ #random_state=1, - max_iter=10000) + max_iter=n_epochs + ) + ''' + elif self.model_type=="LSTM": self.clf = Sequential() - self.clf.add(LSTM(4, batch_input_shape=(scaled_x_train.shape[1], scaled_x_train.shape[2], scaled_x_train.shape[3]), stateful=True)) - self.clf.add(Dense(1)) + + offset=0 + if self.reduce_memory_usage: + offset=1 + + self.clf.add(LSTM( + self.n_neurons, + batch_input_shape=(scaled_x_train.shape[0+offset], scaled_x_train.shape[1+offset], scaled_x_train.shape[2+offset]), + stateful=True, + return_sequences=False, + )) + self.clf.add(Dense(1, activation=activation)) self.clf.compile(loss='mean_squared_error', optimizer='adam') + print(self.clf.summary()) else: self.model_type="Forest" scaled_x_train=self.x_train scaled_y_train=self.y_train self.clf= RandomForestRegressor(max_depth=10) - + print("starting the fitting") + if self.model_type=="LSTM": - for i in range(n_epochs): - if i%100==0: - print("n_epochs: "+str(i)) - for k in range(self.x_train.shape[0]): #for each symbol - self.clf.fit(scaled_x_train[k,:,:,:], scaled_y_train[k,:,:], batch_size=self.x_train.shape[1],epochs=1, shuffle=False, verbose=0) # - self.clf.reset_states() - else: + print(scaled_x_train.shape) + print(scaled_y_train.shape) + if self.reduce_memory_usage: + for i in range(n_epochs): + if i%100==0: + print("n_epochs: "+str(i)) + for k in range(self.x_train.shape[0]): #for each symbol + self.clf.fit(scaled_x_train[k,:,:,:], scaled_y_train[k,:,:], batch_size=self.x_train.shape[1],epochs=1, shuffle=False, + verbose=0) # + self.clf.reset_states() + else: + callback = callbacks.EarlyStopping(monitor='loss',patience=3) + self.clf.fit(scaled_x_train, scaled_y_train, batch_size=self.x_train.shape[0],epochs=n_epochs, shuffle=False, + verbose=1,callbacks=callback) + + elif self.model_type=="MLP" and self.clf.__class__ == Sequential: #keras + callback = callbacks.EarlyStopping(monitor='loss',patience=3) + self.clf.fit(scaled_x_train, scaled_y_train, epochs=n_epochs,callbacks=callback) + else: #sklearn self.clf.fit(scaled_x_train, scaled_y_train) with open(os.path.dirname(__file__)+"/models/"+self.model_name+".pickle", "wb") as f: @@ -706,24 +679,30 @@ def test( self.yhat_test = self.use(self.model_name, selector="test") self.yhat_train=self.use(self.model_name, selector="train") self.yhat_total=self.use(self.model_name, selector="total") - if self.clf.__class__ == Sequential: + if self.model_type=="LSTM": r2_score_test=[] r2_score_train=[] r2_score_total=[] - - for k in range(self.x_test.shape[0]): - r2_score_test.append(metrics.r2_score(self.yhat_test[k,:,:],self.y_test[k,:,:])) - for k in range(self.x_train.shape[0]): - r2_score_train.append(metrics.r2_score(self.yhat_train[k,:,:],self.y_train[k,:,:])) - for k in range(self.x_df.shape[0]): - r2_score_total.append(metrics.r2_score(self.yhat_total[k,:,:],self.y_df[k,:,:])) + + if self.reduce_memory_usage: + for k in range(self.x_test.shape[0]): + r2_score_test.append(metrics.r2_score(self.y_test[k,:,:],self.yhat_test[k,:,:])) + for k in range(self.x_train.shape[0]): + r2_score_train.append(metrics.r2_score(self.y_train[k,:,:],self.yhat_train[k,:,:])) + for k in range(self.x_df.shape[0]): + r2_score_total.append(metrics.r2_score(self.y_df[k,:,:],self.yhat_total[k,:,:])) + else: + r2_score_test.append(metrics.r2_score(self.y_test,self.yhat_test)) + r2_score_train.append(metrics.r2_score(self.y_train,self.yhat_train)) + r2_score_total.append(metrics.r2_score(self.y_df,self.yhat_total)) + print("mean r2 score test: "+str(np.mean(r2_score_test))) print("mean r2 score train: "+str(np.mean(r2_score_train))) print("mean r2 score total: "+str(np.mean(r2_score_total))) else: - r2_score_test = metrics.r2_score(self.yhat_test, self.y_test) - r2_score_train = metrics.r2_score(self.yhat_train, self.y_train) - r2_score_total = metrics.r2_score(self.yhat_total, self.y_df) + r2_score_test = metrics.r2_score(self.y_test, self.yhat_test) + r2_score_train = metrics.r2_score(self.y_train, self.yhat_train) + r2_score_total = metrics.r2_score( self.y_df, self.yhat_total) print("r2 score test: "+str(r2_score_test)) print("r2 score train: "+str(r2_score_train)) print("r2 score total: "+str(r2_score_total)) @@ -743,15 +722,13 @@ def load_model( ''' self.model_name=model_name - import os - print(os.getcwd()) - if "clf" not in self.__dir__() or force: with open(os.path.dirname(__file__)+"/models/"+self.model_name+".pickle", 'rb') as pickle_file: self.clf = pickle.load(pickle_file) #must be after loading clf self.load_model_docu(model_name) - if self.clf.__class__ in [MLPRegressor,Sequential] and "scaler_x" not in self.__dir__(): + + if self.clf.__class__ in [MLPRegressor,Sequential] and ("scaler_x" not in self.__dir__() or force): self.scaler_x = joblib.load(os.path.dirname(__file__)+"/models/scaler_x_"+self.model_name+".save") self.scaler_y = joblib.load(os.path.dirname(__file__)+"/models/scaler_y_"+self.model_name+".save") @@ -800,7 +777,12 @@ def use( self.prod=False if selector=="total" and "x_df" not in self.__dir__(): prepare=True - + + if "steps" in self.__dir__(): + steps=self.steps + else: + steps=None + if prepare: #prepare depending on the model parameters read in load_model self.prepare( @@ -809,37 +791,59 @@ def use( distance=self.distance, lag=self.lag, model_type=self.model_type, - steps=self.steps, + steps=steps, features_name=self.features_name, - prod=self.prod + prod=self.prod, + reduce_memory_usage=self.reduce_memory_usage ) - + if selector=="test": x_df=self.x_test elif selector=="train": x_df=self.x_train else: x_df=self.x_df - - if self.clf.__class__ == MLPRegressor: - scaled_x_df=self.scaler_x.transform(x_df) - elif self.clf.__class__ == Sequential: + + #scale + if self.model_type in ["MLP","LSTM"]: + scaled_x_df=self.scaler_x.fit_transform(x_df.reshape(-1,x_df.shape[-1])).reshape(x_df.shape) #transform above dim 2 + else: + #no scaling for forest + scaled_x_df=x_df + + #create new model + if self.model_type=="LSTM": #for LSTM we need to create a new model because the batch size must be equal to x_df.shape[1], otherwise it will crash model = Sequential() - model.add(LSTM(4, batch_input_shape=(x_df.shape[1], x_df.shape[2], x_df.shape[3]), stateful=True)) - model.add(Dense(1)) + offset=0 + if self.reduce_memory_usage: + offset=1 + + model.add(LSTM(self.n_neurons, + batch_input_shape=(x_df.shape[0+offset], x_df.shape[1+offset], x_df.shape[2+offset]), + stateful=True, + )) + model.add(Dense(1, activation=self.activation)) old_weights = self.clf.get_weights() model.set_weights(old_weights) - #scaling in self.predict here + + if self.model_type=="LSTM": + if self.reduce_memory_usage: + scaled_yhat=None + for j in range(scaled_x_df.shape[0]): + t=model.predict(scaled_x_df[j,:,:,:],batch_size=scaled_x_df.shape[1]) + t=np.reshape(t, (1, t.shape[0], t.shape[1])) + scaled_yhat=np.vstack((scaled_yhat,t)) if scaled_yhat is not None else t + else: + scaled_yhat=model.predict(scaled_x_df,batch_size=scaled_x_df.shape[0]) else: - #no scaling for forest - scaled_x_df=x_df + scaled_yhat=self.clf.predict(scaled_x_df) - if self.clf.__class__ == Sequential: - y=self.predict_lstm(x_df, model) + #inverse scale + if self.model_type in ["MLP","LSTM"]: + y=self.scaler_y.inverse_transform(scaled_yhat.reshape(-1,scaled_yhat.shape[-1])).reshape(scaled_yhat.shape) else: - y=self.clf.predict(scaled_x_df) - y=self.scaler_y.inverse_transform(np.reshape(y,(y.shape[0],1))) + y=scaled_yhat return y class MLLive(ML): @@ -903,6 +907,7 @@ class PreselML(Presel): def __init__( self, period:str, + reduce_trades_number:bool=True, **kwargs): super().__init__(period,**kwargs) @@ -918,9 +923,9 @@ def __init__( self.max_candidates_nb=1 self.no_ust=True - self.strategy="ml" - self.reduce_trades_number=True + self.reduce_trades_number=reduce_trades_number self.threshold_cand=1 #percent + self.calc_all=True def end_init(self): if "model_name" not in self.__dir__(): @@ -931,10 +936,36 @@ def end_init(self): selector="total", prod=True ) - self.yhat=pd.DataFrame( - data=np.transpose(yhat[:,:,0]), - columns=self.close.columns, - index=self.close.index[self.m.steps:]) + + #unflatten + if self.m.model_type=="LSTM" and self.m.reduce_memory_usage: + self.yhat=pd.DataFrame( + data=np.transpose(yhat[:,:,0]), + columns=self.close.columns, + index=self.close.index[self.m.steps:]) + elif self.m.model_type=="LSTM": + out={} + l=len(self.close.index)-self.m.steps + for k, s in enumerate(self.close.columns): + out[s]=yhat[k*l:(k+1)*l][:,0] + + self.yhat=pd.DataFrame( + data=out, + index=self.close.index[self.m.steps:]) + else: + out={} + l=len(self.close.index) + + #The results are all listed one after the other, we need to bring it back to a vbt structure + for k, s in enumerate(self.close.columns): + if len(yhat.shape)==1: #forest + out[s]=yhat[k*l:(k+1)*l][:] + else: #MLP + out[s]=yhat[k*l:(k+1)*l][:,0] + + self.yhat=pd.DataFrame( + data=out, + index=self.close.index) def sorting( self, @@ -943,29 +974,42 @@ def sorting( **kwargs ): - if "yhat" not in self.__dir__(): - raise ValueError("PreselML is an abstract class, it cannot be used directly") - - if self.reduce_trades_number: #otherwise use sorting_g - present_index_nb=self.close.index.get_loc(i) - - if present_index_nb=self.threshold_cand: #change candidate only if the difference is significative + p=self.yhat.loc[i].idxmax() #potential candidate + v=self.yhat.loc[i].max() + if (self.m.steps is None and present_index_nb==0) or \ + (self.m.steps is not None and present_index_nb==self.m.steps): self.sorted=[(p, v)] else: - self.sorted=[(pres_cand, pres_cand_v)] + previous_index=self.close.index[present_index_nb-1] + if len(self.candidates['long'][previous_index])==0: + self.sorted=[(p, v)] + print(previous_index) + print("no candidate") + else: + pres_cand=self.candidates['long'][previous_index][0] + pres_cand_v=self.yhat[pres_cand].loc[i] + if v-pres_cand_v>=self.threshold_cand: #change candidate only if the difference is significative + self.sorted=[(p, v)] + else: + self.sorted=[(pres_cand, pres_cand_v)] + except: + import traceback + print(traceback.format_exc()) + print(previous_index) + print(self.candidates['long'][previous_index]) + print(len(self.candidates['long'][previous_index])) def sorting_g(self): if not self.reduce_trades_number: #otherwise use sorting @@ -974,7 +1018,9 @@ def sorting_g(self): def perform(self, r, **kwargs): candidates, _=self.get_candidates() - r.ss_m.order_nosubstrat(candidates_to_YF(self.ust.symbols_to_YF,candidates), self.ust.exchange, self.strategy, False,keep=False) + print(candidates) + + r.ss_m.order_nosubstrat(candidates_to_YF(self.ust.symbols_to_YF,candidates), self.ust.exchange, self.st.name, False,keep=False) class PreselMLCustom(PreselML): def __init__( @@ -986,15 +1032,45 @@ def __init__( self.model_name=model_name self.end_init() -class PreselLSTM_A(PreselML): +class PreselML_MLP_A(PreselML): def __init__( self, period:str, **kwargs): super().__init__(period,**kwargs) - - self.model_name="lstm_new" - self.strategy="lstm_A" + self.model_name="240107_mlp_future10_neuron40_keras_CAC_DAX_NASDAQ_FIN_HEALTH" + self.end_init() + +class PreselML_LSTM_A(PreselML): + def __init__( + self, + period:str, + **kwargs): + super().__init__(period,**kwargs) + self.model_name="240123_lstm_epoch1000_future10_steps25_neuron8_CAC" self.end_init() - +if __name__=="__main__": + period="2007_2023_08" + m=ML(period,indexes=['CAC40',"DAX", "NASDAQ"]) #,"DAX", "NASDAQ","FIN","HEALTHCARE" + features_name=['STOCH', 'RSI',"WILLR","MFI",'BBANDS_BANDWIDTH','ULTOSC',"OBV","AD", + "GROW_30","GROW_30_RANK","GROW_30_MA","GROW_30_MA_RANK","GROW_30_DEMA","GROW_30_DEMA_RANK", + "GROW_50","GROW_50_RANK","GROW_50_MA","GROW_50_MA_RANK","GROW_50_DEMA","GROW_50_DEMA_RANK", + "GROW_200","GROW_200_RANK","GROW_200_MA","GROW_200_MA_RANK","GROW_200_DEMA","GROW_200_DEMA_RANK", + "KAMA_DURATION","KAMA_DURATION_RANK","NATR","HIST","MACD","DIVERGENCE","STD","MACRO_TREND","HT_TRENDMODE", + "PU_RESISTANCE","PU_SUPPORT"] + + + m.prepare(preprocessing=True, + next_day_price=False, + distance=10, + #model_type="Forest", + model_type="LSTM", + #model_type="MLP", + #steps=20, + reduce_memory_usage=True, + features_name=features_name) + + m.train("240218_lstm_test_reduced_memory",n_epochs=1000,n_neurons=40, activation="tanh") + + #m.test("240115_lstm_no_memory_steps25_future10_tanh_CAC") diff --git a/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.json b/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.json new file mode 100644 index 0000000..79e5c6e --- /dev/null +++ b/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.json @@ -0,0 +1,55 @@ +{ + "features_name": [ + "STOCH", + "RSI", + "WILLR", + "MFI", + "BBANDS_BANDWIDTH", + "ULTOSC", + "OBV", + "AD", + "GROW_30", + "GROW_30_RANK", + "GROW_30_MA", + "GROW_30_MA_RANK", + "GROW_30_DEMA", + "GROW_30_DEMA_RANK", + "GROW_50", + "GROW_50_RANK", + "GROW_50_MA", + "GROW_50_MA_RANK", + "GROW_50_DEMA", + "GROW_50_DEMA_RANK", + "GROW_200", + "GROW_200_RANK", + "GROW_200_MA", + "GROW_200_MA_RANK", + "GROW_200_DEMA", + "GROW_200_DEMA_RANK", + "KAMA_DURATION", + "KAMA_DURATION_RANK", + "NATR", + "HIST", + "MACD", + "DIVERGENCE", + "STD", + "MACRO_TREND", + "HT_TRENDMODE", + "PU_RESISTANCE", + "PU_SUPPORT" + ], + "model_type": "LSTM", + "preprocessing": true, + "next_day_price": false, + "distance": 10, + "lag": 0, + "indexes": [ + "CAC40" + ], + "period": "2007_2023_08", + "n_neurons": 8, + "activation": "tanh", + "reduce_memory_usage": false, + "steps": 10, + "training_date": "21.02.24" +} \ No newline at end of file diff --git a/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.pickle b/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.pickle new file mode 100644 index 0000000..9beac00 Binary files /dev/null and b/py-trading-bot/ml/models/240218_lstm_test_no_reduced_memory.pickle differ diff --git a/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.json b/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.json new file mode 100644 index 0000000..9aa9c71 --- /dev/null +++ b/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.json @@ -0,0 +1,55 @@ +{ + "features_name": [ + "STOCH", + "RSI", + "WILLR", + "MFI", + "BBANDS_BANDWIDTH", + "ULTOSC", + "OBV", + "AD", + "GROW_30", + "GROW_30_RANK", + "GROW_30_MA", + "GROW_30_MA_RANK", + "GROW_30_DEMA", + "GROW_30_DEMA_RANK", + "GROW_50", + "GROW_50_RANK", + "GROW_50_MA", + "GROW_50_MA_RANK", + "GROW_50_DEMA", + "GROW_50_DEMA_RANK", + "GROW_200", + "GROW_200_RANK", + "GROW_200_MA", + "GROW_200_MA_RANK", + "GROW_200_DEMA", + "GROW_200_DEMA_RANK", + "KAMA_DURATION", + "KAMA_DURATION_RANK", + "NATR", + "HIST", + "MACD", + "DIVERGENCE", + "STD", + "MACRO_TREND", + "HT_TRENDMODE", + "PU_RESISTANCE", + "PU_SUPPORT" + ], + "model_type": "LSTM", + "preprocessing": true, + "next_day_price": false, + "distance": 10, + "lag": 0, + "indexes": [ + "CAC40" + ], + "period": "2007_2023_08", + "n_neurons": 8, + "activation": "tanh", + "reduce_memory_usage": true, + "steps": 10, + "training_date": "21.02.24" +} \ No newline at end of file diff --git a/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.pickle b/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.pickle new file mode 100644 index 0000000..443bac1 Binary files /dev/null and b/py-trading-bot/ml/models/240218_lstm_test_reduced_memory.pickle differ diff --git a/py-trading-bot/ml/models/mlp_test.json b/py-trading-bot/ml/models/mlp_test.json new file mode 100644 index 0000000..3f97845 --- /dev/null +++ b/py-trading-bot/ml/models/mlp_test.json @@ -0,0 +1,48 @@ +{ + "features_name": [ + "STOCH", + "RSI", + "WILLR", + "MFI", + "BBANDS_BANDWIDTH", + "ULTOSC", + "OBV", + "AD", + "GROW_30", + "GROW_30_RANK", + "GROW_30_MA", + "GROW_30_MA_RANK", + "GROW_30_DEMA", + "GROW_30_DEMA_RANK", + "GROW_50", + "GROW_50_RANK", + "GROW_50_MA", + "GROW_50_MA_RANK", + "GROW_50_DEMA", + "GROW_50_DEMA_RANK", + "GROW_200", + "GROW_200_RANK", + "GROW_200_MA", + "GROW_200_MA_RANK", + "GROW_200_DEMA", + "GROW_200_DEMA_RANK", + "KAMA_DURATION", + "KAMA_DURATION_RANK", + "NATR", + "HIST", + "MACD", + "DIVERGENCE", + "STD", + "MACRO_TREND", + "HT_TRENDMODE", + "PU_RESISTANCE", + "PU_SUPPORT" + ], + "model_type": "MLP", + "preprocessing": true, + "next_day_price": false, + "distance": 10, + "lag": 0, + "training_date": "23.11.23", + "reduce_memory_usage": false +} diff --git a/py-trading-bot/ml/models/mlp_test.pickle b/py-trading-bot/ml/models/mlp_test.pickle new file mode 100644 index 0000000..9fa10e3 Binary files /dev/null and b/py-trading-bot/ml/models/mlp_test.pickle differ diff --git a/py-trading-bot/ml/models/scaler_x_240218_lstm_test_no_reduced_memory.save b/py-trading-bot/ml/models/scaler_x_240218_lstm_test_no_reduced_memory.save new file mode 100644 index 0000000..65969be Binary files /dev/null and b/py-trading-bot/ml/models/scaler_x_240218_lstm_test_no_reduced_memory.save differ diff --git a/py-trading-bot/ml/models/scaler_x_240218_lstm_test_reduced_memory.save b/py-trading-bot/ml/models/scaler_x_240218_lstm_test_reduced_memory.save new file mode 100644 index 0000000..65969be Binary files /dev/null and b/py-trading-bot/ml/models/scaler_x_240218_lstm_test_reduced_memory.save differ diff --git a/py-trading-bot/ml/models/scaler_x_mlp_test.save b/py-trading-bot/ml/models/scaler_x_mlp_test.save new file mode 100644 index 0000000..3754ceb Binary files /dev/null and b/py-trading-bot/ml/models/scaler_x_mlp_test.save differ diff --git a/py-trading-bot/ml/models/scaler_y_240218_lstm_test_no_reduced_memory.save b/py-trading-bot/ml/models/scaler_y_240218_lstm_test_no_reduced_memory.save new file mode 100644 index 0000000..bc43217 Binary files /dev/null and b/py-trading-bot/ml/models/scaler_y_240218_lstm_test_no_reduced_memory.save differ diff --git a/py-trading-bot/ml/models/scaler_y_240218_lstm_test_reduced_memory.save b/py-trading-bot/ml/models/scaler_y_240218_lstm_test_reduced_memory.save new file mode 100644 index 0000000..bc43217 Binary files /dev/null and b/py-trading-bot/ml/models/scaler_y_240218_lstm_test_reduced_memory.save differ diff --git a/py-trading-bot/ml/models/scaler_y_mlp_test.save b/py-trading-bot/ml/models/scaler_y_mlp_test.save new file mode 100644 index 0000000..55a7f01 Binary files /dev/null and b/py-trading-bot/ml/models/scaler_y_mlp_test.save differ diff --git a/py-trading-bot/opt/opt_main.py b/py-trading-bot/opt/opt_main.py index 7397cd2..5731336 100644 --- a/py-trading-bot/opt/opt_main.py +++ b/py-trading-bot/opt/opt_main.py @@ -58,6 +58,8 @@ def __init__( filename: str="main", testing: bool=False, opt_only_exit: bool=False, + proba_one: float=0.05, + minimum_trades: int=50 ): ''' Optimisation main class @@ -85,9 +87,12 @@ def __init__( testing: set to True to perform unittest on the function filename: name of the file where to solve the result opt_only_exit: optimize only the exits as the entries are fixed by another mechanism + proba_one: in the random array probability of having a 1 (other values are 0) in pu + minimum_trades: minimum number of trades for a strategy to be eligible. "Hold" strategies are not of interest ''' for k in ["ratio_learn_train","split_learn_train", "indexes", "it_is_index","nb_macro_modes", - "strat_arr","fees", "sl", "tsl", "filename","testing","opt_only_exit"]: + "strat_arr","fees", "sl", "tsl", "filename","testing","opt_only_exit","proba_one", + "minimum_trades"]: setattr(self,k,locals()[k]) #init for key in ["close","open","low","high","data","volume"]: @@ -568,7 +573,7 @@ def random(self, l:int)-> list: #choose randomly 0 and 1. All zeros is not accepted. 90% chance 0, 10% chance 1 s=0 while s==0: - arr=np.random.choice(2,l, p=[0.9, 0.1]) + arr=np.random.choice(2,l, p=[1-self.proba_one, self.proba_one]) s=np.sum(arr) return arr @@ -646,7 +651,7 @@ def variate( else: sub_df=self.test_arrs.loc[self.variate_first_ind:] - if sub_df["opt_return"].max() > self.best_loop_ret and self.trades>50: + if sub_df["opt_return"].max() > self.best_loop_ret and self.trades>self.minimum_trades: self.progression=True self.best_loop_ret=sub_df["opt_return"].max() self.log("Overall perf, "+dic+": " + str(round(self.best_loop_ret,3)),pr=True) diff --git a/py-trading-bot/opt/opt_strat.py b/py-trading-bot/opt/opt_strat.py index 6312ff3..b7c9167 100644 --- a/py-trading-bot/opt/opt_strat.py +++ b/py-trading-bot/opt/opt_strat.py @@ -8,7 +8,7 @@ Script to optimize the combination of patterns/signals used for a given strategy The optimization takes place on the actions from CAC40, DAX and Nasdaq -Parameters very good on some actions but very bad for others should not be selected +Parameters very good on some stocks but very bad for others should not be selected The optimization algorithm calculates one point, look for the points around it and select the best one As it can obviously lead to local maximum, the starting point is selected in a random manner @@ -88,7 +88,7 @@ def calculate_pf( ret_arr.append(t) else: ret+=self.calculate_eq_ret(pf_dic[ind],ind) - self.row["trades_"+ind+"_"+dic]=len(pf_dic[ind].get_trades().records_arr) + self.row["trades_"+ind+"_"+dic]=len(pf_dic[ind].get_trades().records_arr)/len(pf_dic[ind].wrapper.columns) if self.it_is_index: self.row["mean_surperf_factor_w_"+ind+"_"+dic+"_raw"]=np.mean(ret_arr) diff --git a/py-trading-bot/orders/admin.py b/py-trading-bot/orders/admin.py index 541ff5e..0d7c180 100644 --- a/py-trading-bot/orders/admin.py +++ b/py-trading-bot/orders/admin.py @@ -41,9 +41,10 @@ def get_form(self, request, obj=None, **kwargs): class StratCandidatesAdmin(admin.ModelAdmin): def get_form(self, request, obj=None, **kwargs): form = super(StratCandidatesAdmin, self).get_form(request, obj, **kwargs) + if "index" in self.instance.name: - ind=ActionCategory.objects.get(short="IND") - form.base_fields['actions'].queryset = Action.objects.filter(category=ind) - -admin.site.register(StratCandidates, StratCandidatesAdmin) + ind=ActionCategory.objects.get(short="IND") + form.base_fields['actions'].queryset = Action.objects.filter(category=ind) +#, StratCandidatesAdmin +admin.site.register(StratCandidates ) # Register your models here. diff --git a/py-trading-bot/orders/ib.py b/py-trading-bot/orders/ib.py index 4242da5..173db7f 100644 --- a/py-trading-bot/orders/ib.py +++ b/py-trading-bot/orders/ib.py @@ -107,9 +107,7 @@ def check_enough_cash( excess_money_engaged=False out_order_size=0 base_out_order_size=0 - - print("base_cash:"+str(base_cash)) - print("money_engaged:"+str(money_engaged)) + if base_cash is not None: if base_cash>=base_order_size: enough_cash=True @@ -121,7 +119,7 @@ def check_enough_cash( base_out_order_size=base_cash if st.maximum_money_engaged is not None and (money_engaged+base_out_order_size>st.maximum_money_engaged): - print("excess_money_engaged for strategy: "+st.name+" candidate: "+action.name) + logger.info("excess_money_engaged for strategy: "+st.name+" candidate: "+action.name) excess_money_engaged=True return enough_cash, out_order_size, excess_money_engaged @@ -154,7 +152,7 @@ def get_money_engaged( price_base=convert_to_base(action.currency.symbol,price) total_money_engaged+=ss.quantity*price_base else: - print("price for "+symbol+" is nan") + logger.info("price for "+symbol+" is nan") return total_money_engaged @@ -278,7 +276,7 @@ def place( if last_price!=0: quantity=math.floor(order_size/last_price) else: - print("last price is zero for "+action.symbol) + logger.info("last price is zero for "+action.symbol) return 1.0, 0.0 if not testing: @@ -430,8 +428,8 @@ def get_last_price_sub(cls,contract): cls.resolve_client(client=None) m_data = cls.client.reqMktData(contract) while (m_data.last != m_data.last) and (m_data.bid != m_data.bid) and t1: for a in actions: - if a.ib_ticker()==contract.localSymbol: + if a.ib_ticker()==contract.localSymbol and a.stock_ex.ib_ticker==contract.exchange: action=a - + elif a.ib_ticker()==contract.localSymbol: + action_back_up=a + actions_back_up.append(action_back_up) + + if action is None: + logger.info("Combination, ticker: "+contract.localSymbol+" exchange: "+contract.exchange + " not found in database") + if action_back_up is not None: + logger.info("But combination, ticker: "+contract.localSymbol+" exchange: "+action_back_up.stock_ex.ib_ticker + " found in database") + if action is not None: if action in actions_in_pf: actions_in_pf.remove(action) @@ -639,21 +644,22 @@ def actualize_ss(self,**kwargs): present_ss=StockStatus.objects.get(action=action) if present_ss.quantity!=pos.position: if pos.position==0: - logger_trade.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to " + str(pos.position) + ", strategy set to none") + logger.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to " + str(pos.position) + ", strategy set to none") else: - logger_trade.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to " + str(pos.position) + ", update manually the strategy") + logger.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to " + str(pos.position) + ", update manually the strategy") present_ss.quantity=pos.position present_ss.strategy=Strategy.objects.get(name="none") present_ss.order_in_ib=True present_ss.save() #in pf but not anymore in IB, so sold manually for action in actions_in_pf: #only those remaining - logger_trade.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to 0") - present_ss=StockStatus.objects.get(action=action) - present_ss.quantity=0 - present_ss.strategy=Strategy.objects.get(name="none") - present_ss.order_in_ib=False - present_ss.save() + if action not in actions_back_up: #to avoid the case of ISLAND exchange used outside of business hours + logger_trade.info(action.symbol+" quantity actualized from "+ str(present_ss.quantity) +" to 0") + present_ss=StockStatus.objects.get(action=action) + present_ss.quantity=0 + present_ss.strategy=Strategy.objects.get(name="none") + present_ss.order_in_ib=False + present_ss.save() def retrieve_quantity( self, @@ -670,6 +676,10 @@ def retrieve_quantity( for pos in self.client.positions(): contract=pos.contract if action.ib_ticker()==contract.localSymbol: + logger.info("position") + logger.info(pos.position) + logger.info(abs(pos.position)) + return abs(pos.position), np.sign(pos.position), pos.position<0 return 0, 0, False @@ -768,6 +778,8 @@ def place( return 1.0, 0.0 else: quantity=abs(quantity) + logger.info("quantity got from quantity") + logger.info(quantity) if not testing: if buy: @@ -865,7 +877,6 @@ def get_order(self,buy: bool): orders=Order.objects.filter(c1 & c2) if len(orders)>1: - print("several active orders have been found for: "+self.action.symbol+", check the database") logger.error("several active orders have been found for: "+self.action.symbol+", check the database") if len(orders)==0: @@ -997,12 +1008,15 @@ def get_delta_size(self): self.order.quantity=present_quantity self.order.save() self.reverse=False + if present_quantity!=0: self.present_size= present_sign*present_quantity*get_last_price(self.action) - if present_sign!= np.sign(self.target_size): + + if present_sign !=0 and np.sign(self.target_size)!=0 and present_sign!= np.sign(self.target_size): self.reverse=True else: self.present_size=0 + self.delta_size=self.target_size-self.present_size def close_order(self): @@ -1015,6 +1029,7 @@ def close_order(self): def close_quantity(self): self.ss.quantity=0 + self.ss.strategy=Strategy.objects.get(name="none") self.ss.save() def sell_order_sub(self): @@ -1094,9 +1109,9 @@ def buy_order_sub(self): excess_money_engaged=False if not enough_cash: - logger.info(str(self.symbol) + " order not executed, not enough cash available") + logger_trade.info(str(self.symbol) + " order not executed, not enough cash available") elif excess_money_engaged: - logger.info(str(self.symbol) + " order not executed, maximum money engaged for one strategy exceeded") + logger_trade.info(str(self.symbol) + " order not executed, maximum money engaged for one strategy exceeded") else: if self.new_order_bool: #we open a new long order self.reverse=False @@ -1109,8 +1124,6 @@ def buy_order_sub(self): if order_size>0 and self.present_size<=0: #if reverse but excluded then close without further conditions if self.reverse and self.symbol not in self.excluded.retrieve(): - print("entry place") - self.entry_place(True, order_size=order_size) self.order.exiting_price=self.new_order.entering_price elif _settings["USED_API"]["orders"]=="IB" : @@ -1125,7 +1138,7 @@ def buy_order_sub(self): self.order.exiting_price=self.order.exiting_price self.close_quantity() else: - logger_trade.info("Manual exit order symbol: "+self.symbol+" , strategy: " + self.st.name + "which is in short position") + logger_trade.info("Manual exit order symbol: "+self.symbol+" , strategy: " + self.st.name + " which is in short position") self.close_quantity() self.calc_profit() self.close_order() diff --git a/py-trading-bot/orders/models.py b/py-trading-bot/orders/models.py index b19a702..720d4b6 100644 --- a/py-trading-bot/orders/models.py +++ b/py-trading-bot/orders/models.py @@ -199,12 +199,8 @@ class Meta: ordering = ["name"] def save(self, *args, **kwargs): - is_new=False - if "id" not in self.__dir__(): - is_new = True super().save(*args, **kwargs) - if is_new: - StockStatus.objects.create(action=self) + StockStatus.objects.get_or_create(action=self) def ib_ticker(self): if self.ib_ticker_explicit!="AAA" and self.ib_ticker_explicit is not None: @@ -230,15 +226,20 @@ def filter_intro_sub( ''' td=datetime.datetime.today() if y_period is None: - limit_date=td + limit_date_intro=td else: - limit_date=datetime.datetime(td.year-y_period,td.month,td.day,tzinfo=tz_Paris) #time zone not important here but otherwise bug + if td.month==2 and td.day==29: #29th feb + tdday=28 + else: + tdday=td.day + limit_date_intro=datetime.datetime(td.year-y_period,td.month,tdday,tzinfo=tz_Paris) #time zone not important here but otherwise bug + limit_date_delisted=datetime.datetime(td.year,td.month,td.day,tzinfo=tz_Paris) #today tz aware if a.intro_date is not None: #should come from database - if a.intro_date>limit_date : + if a.intro_date>limit_date_intro : return False if a.delisting_date is not None: - if a.delisting_datePython Trading bot -

Author: Psemdel, Version: 0.20, Github

+

Author: Psemdel, Version: 0.21, Github

diff --git a/py-trading-bot/reporting/views.py b/py-trading-bot/reporting/views.py index fff18d3..740383d 100644 --- a/py-trading-bot/reporting/views.py +++ b/py-trading-bot/reporting/views.py @@ -77,7 +77,7 @@ def daily_report( for sec in ActionSector.objects.all(): strats=getattr(sec,a).all() if len(strats)!=0: #some strategy is activated for this sector - print("starting report " + sec) + print("starting report " + sec.name) daily_report_sub(s_ex.name,sec=sec) else: strats=getattr(s_ex,a).all() diff --git a/py-trading-bot/start_bot_venv.sh b/py-trading-bot/start_bot_venv.sh new file mode 100755 index 0000000..c2b7bdf --- /dev/null +++ b/py-trading-bot/start_bot_venv.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +source /mnt/Gros/Progra/Anaconda/bin/activate PyTradingBot39 + redis-server & + python3 manage.py runserver & + sleep 50 + xdg-open http://localhost:8000/start_bot + celery -A trading_bot worker -l info + && fg + + + diff --git a/py-trading-bot/tests/test_defi.py b/py-trading-bot/tests/test_defi.py index 253cffb..4c9b487 100644 --- a/py-trading-bot/tests/test_defi.py +++ b/py-trading-bot/tests/test_defi.py @@ -9,7 +9,7 @@ from opt import opt_main from opt import opt_strat import numpy as np -from core import strat +from core import strat, common import numbers import vectorbtpro as vbt @@ -92,8 +92,14 @@ def test_defi_i1(self): self.assertEqual(np.shape(self.o.ents["CAC40"]),np.shape(ust.entries)) self.assertEqual(np.shape(self.o.exs["CAC40"]),np.shape(ust.exits)) - self.assertTrue(np.equal(self.o.ents["CAC40"], ust.entries).all().all()) - self.assertTrue(np.equal(self.o.exs["CAC40"], ust.exits).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents["CAC40"]), + common.remove_multi(ust.entries) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs["CAC40"]), + common.remove_multi(ust.exits) + ).all().all()) self.assertEqual(rr1,rr2) self.assertEqual(rb1,rb2) @@ -136,8 +142,14 @@ def test_defi_i2(self): self.assertEqual(np.shape(self.o.ents["CAC40"]),np.shape(ust.entries)) self.assertEqual(np.shape(self.o.exs["CAC40"]),np.shape(ust.exits)) - self.assertTrue(np.equal(self.o.ents["CAC40"], ust.entries).all().all()) - self.assertTrue(np.equal(self.o.exs["CAC40"], ust.exits).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents["CAC40"]), + common.remove_multi(ust.entries) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs["CAC40"]), + common.remove_multi(ust.exits) + ).all().all()) self.assertEqual(rr1,rr2) self.assertEqual(rb1,rb2) @@ -180,13 +192,26 @@ def test_defi_i3(self): rr2=round(np.mean(pf2.get_total_return().values),3) rb2=round(np.mean(pf2.total_market_return.values),3) + self.assertEqual(np.shape(self.o.ents["CAC40"]),np.shape(ust.entries)) self.assertEqual(np.shape(self.o.exs["CAC40"]),np.shape(ust.exits)) - self.assertTrue(np.equal(self.o.ents["CAC40"], ust.entries).all().all()) - self.assertTrue(np.equal(self.o.exs["CAC40"], ust.exits).all().all()) - self.assertTrue(np.equal(self.o.ents_short["CAC40"], ust.entries_short).all().all()) - self.assertTrue(np.equal(self.o.exs_short["CAC40"], ust.exits_short).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents["CAC40"]), + common.remove_multi(ust.entries) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs["CAC40"]), + common.remove_multi(ust.exits) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents_short["CAC40"]), + common.remove_multi(ust.entries_short) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs_short["CAC40"]), + common.remove_multi(ust.exits_short) + ).all().all()) self.assertEqual(rr1,rr2) self.assertEqual(rb1,rb2) @@ -236,12 +261,28 @@ def test_defi_i4(self): self.assertEqual(np.shape(self.o.ents["CAC40"]),np.shape(ust.entries)) self.assertEqual(np.shape(self.o.exs["CAC40"]),np.shape(ust.exits)) - self.assertTrue(np.equal(self.o.ents["CAC40"], ust.entries).all().all()) - self.assertTrue(np.equal(self.o.exs["CAC40"], ust.exits).all().all()) - self.assertTrue(np.equal(self.o.macro_trend["CAC40"]["total"], ust.macro_trend).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents["CAC40"]), + common.remove_multi(ust.entries) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs["CAC40"]), + common.remove_multi(ust.exits) + ).all().all()) + + self.assertTrue(np.equal( + common.remove_multi(self.o.macro_trend["CAC40"]["total"]), + common.remove_multi(ust.macro_trend) + ).all().all()) - self.assertTrue(np.equal(self.o.ents_short["CAC40"], ust.entries_short).all().all()) - self.assertTrue(np.equal(self.o.exs_short["CAC40"], ust.exits_short).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.ents_short["CAC40"]), + common.remove_multi(ust.entries_short) + ).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.o.exs_short["CAC40"]), + common.remove_multi(ust.exits_short) + ).all().all()) self.assertEqual(rr1,rr2) self.assertEqual(rb1,rb2) diff --git a/py-trading-bot/tests/test_ib.py b/py-trading-bot/tests/test_ib.py index ab13d7b..1a5fb82 100644 --- a/py-trading-bot/tests/test_ib.py +++ b/py-trading-bot/tests/test_ib.py @@ -299,6 +299,17 @@ def test_get_delta_size(self): op.get_delta_size() self.assertTrue(op.reverse) self.assertTrue(op.delta_size>0) + + op.target_size=0 + op.get_delta_size() + self.assertFalse(op.reverse) + + op.ss.quantity=0 + op.ss.save() + op.target_size=10000 + op.get_delta_size() + self.assertFalse(op.reverse) + self.assertTrue(op.delta_size>0) def test_entry_place(self): diff --git a/py-trading-bot/tests/test_indicators.py b/py-trading-bot/tests/test_indicators.py index 925654d..2b613fb 100644 --- a/py-trading-bot/tests/test_indicators.py +++ b/py-trading-bot/tests/test_indicators.py @@ -15,7 +15,7 @@ class TestIndicator(TestCase): @classmethod def setUpClass(self): super().setUpClass() - self.ust=strat.StratDiv("2007_2023_08", symbol_index="CAC40") + self.ust=strat.StratDiv2("2007_2023_08", symbol_index="CAC40") self.ust.run() def test_rel_dif(self): @@ -129,9 +129,7 @@ def test_VBTSTOCHKAMA(self): def test_VBTKAMA(self): t=ic.VBTKAMA.run(self.ust.close) - - print(t.bot_ext['AIR'].values[-10:]) - + self.assertFalse(t.bot_ext['AIR'].values[-2]) self.assertFalse(t.bot_ext['AIR'].values[-1]) @@ -217,11 +215,11 @@ def test_VBTBBANDSTREND(self): self.assertEqual(t.trend['AIR'].values[-1],0) self.assertEqual(t.trend['ATO'].values[-1],10) self.assertEqual(t.trend['BN'].values[-1],0) - - self.assertEqual(round(t.bb_bw['AC'].values[-1],2),0.03) - self.assertEqual(round(t.bb_bw['AI'].values[-1],2),0.06) - self.assertEqual(round(t.bb_bw['AIR'].values[-1],2),0.05) - self.assertEqual(round(t.bb_bw['SLB'].values[-1],2),0.19) + + self.assertEqual(round(t.bb_bw['AC'].values[-1],2),0.02) + self.assertEqual(round(t.bb_bw['AI'].values[-1],2),0.04) + self.assertEqual(round(t.bb_bw['AIR'].values[-1],2),0.03) + self.assertEqual(round(t.bb_bw['SLB'].values[-1],2),0.07) def test_VBTMACDBBTREND(self): t=ic.VBTMACDBBTREND.run(self.ust.close) @@ -234,10 +232,10 @@ def test_VBTMACDBBTREND(self): self.assertEqual(round(t.kama['MC'].values[-2],2),847.39) self.assertEqual(round(t.kama['MC'].values[-3],2),847.46) - self.assertEqual(round(t.bb_bw['AC'].values[-1],2),0.03) - self.assertEqual(round(t.bb_bw['AI'].values[-1],2),0.06) - self.assertEqual(round(t.bb_bw['AIR'].values[-1],2),0.05) - self.assertEqual(round(t.bb_bw['SLB'].values[-1],2),0.19) + self.assertEqual(round(t.bb_bw['AC'].values[-1],2),0.02) + self.assertEqual(round(t.bb_bw['AI'].values[-1],2),0.04) + self.assertEqual(round(t.bb_bw['AIR'].values[-1],2),0.03) + self.assertEqual(round(t.bb_bw['SLB'].values[-1],2),0.07) self.assertEqual(round(t.bb_bw['MC'].values[-1],2),0.09) self.assertEqual(round(t.bb_bw['MC'].values[-2],2),0.09) self.assertEqual(round(t.bb_bw['MC'].values[-3],2),0.09) diff --git a/py-trading-bot/tests/test_macro.py b/py-trading-bot/tests/test_macro.py index 9f3e20c..bdb9952 100644 --- a/py-trading-bot/tests/test_macro.py +++ b/py-trading-bot/tests/test_macro.py @@ -68,26 +68,28 @@ def test_VBTMACROFILTER(self): self.ust.run() t=macro.VBTMACROTREND.run(self.ust.close) - - self.assertEqual(self.ust.entries['AC'].values[0],False) + + self.assertEqual(self.ust.entries[self.ust.entries.columns[0]].values[0],False) self.assertEqual(t.macro_trend['AC'].values[0],0) t2=macro.VBTMACROFILTER.run(self.ust.entries,t.macro_trend, mode=0 ) self.assertEqual(t.macro_trend['AC'].values[0],t2.out[t2.out.columns[0]].values[0]) - self.assertEqual(self.ust.entries['AC'].values[-1],True) - self.assertEqual(t.macro_trend['AC'].values[-1],1) - self.assertFalse(t2.out[t2.out.columns[0]].values[-1]) - - self.assertEqual(self.ust.entries['ATO'].values[-1],True) - self.assertEqual(t.macro_trend['ATO'].values[-1],1) - self.assertFalse(t2.out[t2.out.columns[4]].values[-1]) + self.assertEqual(self.ust.entries[self.ust.entries.columns[0]].values[-12],True) + self.assertEqual(t.macro_trend['AC'].values[-12],1) + self.assertFalse(t2.out[t2.out.columns[0]].values[-12]) - self.assertEqual(self.ust.entries['BNP'].values[-34],True) - self.assertEqual(t.macro_trend['BNP'].values[-34],0) - self.assertTrue(t2.out[t2.out.columns[6]].values[-34]) + self.assertEqual(self.ust.entries[self.ust.entries.columns[11]].values[-6],True) + self.assertEqual(t.macro_trend['DSY'].values[-6],-1) + self.assertFalse(t2.out[t2.out.columns[11]].values[-6]) + self.assertEqual(self.ust.entries[self.ust.entries.columns[5]].values[-11],True) + self.assertEqual(t.macro_trend['BN'].values[-11],0) + self.assertTrue(t2.out[t2.out.columns[5]].values[-11]) + self.assertEqual(self.ust.entries[self.ust.entries.columns[8]].values[-5],True) + self.assertEqual(t.macro_trend['CAP'].values[-5],0) + self.assertTrue(t2.out[t2.out.columns[8]].values[-5]) diff --git a/py-trading-bot/tests/test_ml.py b/py-trading-bot/tests/test_ml.py index 5c7f984..146ced3 100644 --- a/py-trading-bot/tests/test_ml.py +++ b/py-trading-bot/tests/test_ml.py @@ -162,40 +162,57 @@ def test_load_model(self): self.m.load_model("lstm_test",force=True) self.assertEqual(self.m.model_type,"LSTM") - + def test_use_MLP(self): y=self.m.use("mlp_test","total") - self.assertEqual(y.shape,(156156,1)) - self.assertEqual(round(y[0,0],2),-2.09) - self.assertEqual(round(y[-1,0],2),-2.09) + self.assertEqual(y.shape,(156156,)) + self.assertEqual(round(y[0],2),-2.09) + self.assertEqual(round(y[-1],2),-2.09) y=self.m.use("mlp_test","test") - self.assertEqual(y.shape,(31239,1)) - self.assertEqual(round(y[0,0],2),-2.09) - self.assertEqual(round(y[-1,0],2),-2.09) + self.assertEqual(y.shape,(31239,)) + self.assertEqual(round(y[0],2),-2.09) + self.assertEqual(round(y[-1],2),-2.09) y=self.m.use("mlp_test","train") - self.assertEqual(y.shape,(124917,1)) - self.assertEqual(round(y[0,0],2),-2.09) - self.assertEqual(round(y[-1,0],2),-2.09) - - def test_use_LSTM(self): - y=self.m.use("lstm_test","total") + self.assertEqual(y.shape,(124917,)) + self.assertEqual(round(y[0],2),-2.09) + self.assertEqual(round(y[-1],2),-2.09) + + def test_use_LSTM_reduced_memory(self): + #no reduce memory usage + y=self.m.use("240218_lstm_test_reduced_memory","total") self.assertEqual(y.shape,(39,3994,1 )) - self.assertEqual(round(y[0,0,0].item(),2),-0.92) - self.assertEqual(round(y[0,-1,0].item(),2),-4.32) + self.assertEqual(round(y[0,0,0].item(),2),-1.41) + self.assertEqual(round(y[0,-1,0].item(),2),-2.24) - y=self.m.use("lstm_test","test") + y=self.m.use("240218_lstm_test_reduced_memory","test") self.assertEqual(y.shape,(39,791,1 )) - self.assertEqual(round(y[0,0,0].item(),2),-0.92) - self.assertEqual(round(y[0,-1,0].item(),2),-2.88) + self.assertEqual(round(y[0,0,0].item(),2),-0.83) + self.assertEqual(round(y[0,-1,0].item(),2),-2.12) - y=self.m.use("lstm_test","train") + y=self.m.use("240218_lstm_test_reduced_memory","train") self.assertEqual(y.shape,(39,3193,1 )) - self.assertEqual(round(y[0,0,0].item(),2),-2.17) - self.assertEqual(round(y[0,-1,0].item(),2),-4.32) - - - - + self.assertEqual(round(y[0,0,0].item(),2),-1.93) + self.assertEqual(round(y[0,-1,0].item(),2),-2.31) + + def test_use_LSTM_no_reduced_memory(self): + #no reduce memory usage + y=self.m.use("240218_lstm_test_no_reduced_memory","total") + self.assertEqual(y.shape,(155766,1 )) + self.assertEqual(round(y[0,0].item(),2),-0.86) + self.assertEqual(round(y[3993,0].item(),2),-3.14) + self.assertEqual(round(y[-1,0].item(),2),-2.87) + + y=self.m.use("240218_lstm_test_no_reduced_memory","test") + self.assertEqual(y.shape,(30849,1 )) + self.assertEqual(round(y[0,0].item(),2),-1.9) #the scaling cause total != test for same index + self.assertEqual(round(y[790,0].item(),2),-2.21) + self.assertEqual(round(y[-1,0].item(),2),-1.58) + + y=self.m.use("240218_lstm_test_no_reduced_memory","train") + self.assertEqual(y.shape,(124527,1 )) + self.assertEqual(round(y[0,0].item(),2),-3.48) + self.assertEqual(round(y[-1,0].item(),2),-2.81) + self.assertEqual(round(y[3193,0].item(),2),-2.13) \ No newline at end of file diff --git a/py-trading-bot/tests/test_opt_main.py b/py-trading-bot/tests/test_opt_main.py index ce47d49..1681063 100644 --- a/py-trading-bot/tests/test_opt_main.py +++ b/py-trading-bot/tests/test_opt_main.py @@ -124,7 +124,7 @@ def test_defi(self): self.assertEqual(np.shape(self.o.ents["CAC40"])[1],39) self.assertFalse(self.o.ents["CAC40"][self.o.ents["CAC40"].columns[0]].values[-1]) - self.assertTrue(self.o.ents["CAC40"][self.o.ents["CAC40"].columns[1]].values[-1]) + self.assertFalse(self.o.ents["CAC40"][self.o.ents["CAC40"].columns[1]].values[-1]) self.assertTrue(self.o.ents["CAC40"][self.o.ents["CAC40"].columns[1]].values[-2]) exs_total=copy.deepcopy(self.o.exs) @@ -211,7 +211,7 @@ def test_get_ret(self): short_exits =self.ust.exits_short) t=self.o.get_ret(pf,"CAC40","learn") self.assertEqual(len(t),39) - self.assertEqual(round(t['VIV'],2),-0.97) + self.assertEqual(round(t['VIV'],2),-1.29) def test_append_row(self): self.o.test_arrs=None diff --git a/py-trading-bot/tests/test_opt_presel.py b/py-trading-bot/tests/test_opt_presel.py index ea93b4c..0fac200 100644 --- a/py-trading-bot/tests/test_opt_presel.py +++ b/py-trading-bot/tests/test_opt_presel.py @@ -8,17 +8,25 @@ from django.test import TestCase from opt import opt_presel, opt_keep -from core import presel +from core import presel, common import vectorbtpro as vbt import numpy as np class TestOptPresel(TestCase): def test_div(self): - a={'simple': - {'ent': ['RSI20'], - 'ex': ['KAMA','SUPERTREND','BBANDS',"CDLBELTHOLD","CDLHIKKAKE","CDLRISEFALL3METHODS","CDLBREAKAWAY", - "CDL3BLACKCROWS"] - }} + a={'bull': { + 'ent': ['BBANDS', 'CDL3BLACKCROWS'], + 'ex': ['ULTOSC20', 'CDLHIKKAKE','CDLABANDONEDBABY', 'CDL3BLACKCROWS','CDLHIKKAKEMOD'] + }, + 'bear': { + 'ent': ['CDLHANGINGMAN', 'CDLSTICKSANDWICH', 'CDL3LINESTRIKE'], + 'ex': ['STOCH', 'BBANDS', 'CDLBELTHOLD', 'CDLXSIDEGAP3METHODS'] + }, + 'uncertain': { + 'ent': ['KAMA'], + 'ex': ['WILLR','ULTOSC20','ULTOSC25','CDL3LINESTRIKE','CDLDARKCLOUDCOVER', 'CDL3INSIDE'] + } + } self.o=opt_presel.Opt("PreselDivergence", "2007_2022_08", @@ -127,7 +135,10 @@ def test_keep(self): ) self.assertTrue(np.equal(self.bti.entries, self.o.ents["CAC40"]).all().all()) - self.assertTrue(np.equal(self.bti.exits, self.o.exs["CAC40"]).all().all()) + self.assertTrue(np.equal(common.remove_multi(self.bti.exits) + , common.remove_multi(self.o.exs["CAC40"]) + ).all().all() + ) self.bti2=presel.PreselRetardMacro(self.period,symbol_index=self.symbol_index) self.bti2.run() @@ -143,7 +154,10 @@ def test_keep(self): #calculate_pf_sub should also have calculated DAX correctly the first time self.assertTrue(np.equal(self.bti3.entries, self.o.ents["DAX"]).all().all()) - self.assertTrue(np.equal(self.bti3.exits, self.o.exs["DAX"]).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.bti3.exits), + common.remove_multi(self.o.exs["DAX"]) + ).all().all()) pf3=vbt.Portfolio.from_signals(self.bti3.close, self.bti3.entries, @@ -171,7 +185,10 @@ def test_keep(self): ) self.assertTrue(np.equal(self.bti.entries.loc[i], self.o.ents["CAC40"]).all().all()) - self.assertTrue(np.equal(self.bti.exits.loc[i], self.o.exs["CAC40"]).all().all()) + self.assertTrue(np.equal( + common.remove_multi(self.bti.exits.loc[i]), + common.remove_multi(self.o.exs["CAC40"]) + ).all().all()) self.assertTrue(np.equal(pf.get_total_return(), pf_dic["CAC40"].get_total_return()).all()) pf_dic=self.o.calculate_pf_sub(dic="test") @@ -186,4 +203,4 @@ def test_keep(self): call_seq='auto', cash_sharing=True, ) - self.assertTrue(np.equal(pf.get_total_return(), pf_dic["CAC40"].get_total_return()).all()) \ No newline at end of file + self.assertTrue(np.equal(pf.get_total_return(), pf_dic["CAC40"].get_total_return()).all()) \ No newline at end of file diff --git a/py-trading-bot/tests/test_orders.py b/py-trading-bot/tests/test_orders.py index e67c794..bcdc49e 100644 --- a/py-trading-bot/tests/test_orders.py +++ b/py-trading-bot/tests/test_orders.py @@ -205,7 +205,11 @@ def test_check_ib_permission(self): "alerting": "IB", "reporting": "YF", } - + _settings["USED_API"]={ + "orders": "", + "alerting": "", + "reporting": "", + } m.check_ib_permission(None) self.assertEqual(_settings["USED_API"]["orders"],"IB") self.assertEqual(_settings["USED_API"]["alerting"],"IB") @@ -217,6 +221,11 @@ def test_check_ib_permission2(self): "alerting": "MT5", "reporting": "TS", } + _settings["USED_API"]={ + "orders": "", + "alerting": "", + "reporting": "", + } m.check_ib_permission(["AI","AC"]) self.assertEqual(_settings["USED_API"]["orders"],"CCXT") diff --git a/py-trading-bot/tests/test_presel.py b/py-trading-bot/tests/test_presel.py index 0a5b0ac..b86b437 100644 --- a/py-trading-bot/tests/test_presel.py +++ b/py-trading-bot/tests/test_presel.py @@ -68,7 +68,7 @@ def test_get_order(self): self.assertEqual( self.bti.get_order("AC.PA","none"),o) def test_get_last_exit(self): - self.ust=strat.StratDiv(self.period, symbol_index=self.symbol_index) + self.ust=strat.StratDiv2(self.period, symbol_index=self.symbol_index) self.ust.run() self.bti=presel.Presel(self.period,symbol_index=self.symbol_index,input_ust=self.ust) @@ -101,7 +101,7 @@ def test_preselect_vol(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),-0.21) + self.assertEqual(round(pf.get_total_return(),2),-0.52) def test_preselect_retard(self): self.bti=presel.PreselRetard(self.period,symbol_index=self.symbol_index) @@ -133,7 +133,7 @@ def test_preselect_macd_vol(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),1.78) + self.assertEqual(round(pf.get_total_return(),2),-0.68) def test_preselect_hist_vol(self): self.bti=presel.PreselHistVol(self.period,symbol_index=self.symbol_index) @@ -149,7 +149,7 @@ def test_preselect_hist_vol(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),2.74) + self.assertEqual(round(pf.get_total_return(),2),1.16) def test_preselect_divergence(self): self.bti=presel.PreselDivergence(self.period,symbol_index=self.symbol_index) @@ -165,7 +165,7 @@ def test_preselect_divergence(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),22.29) + self.assertEqual(round(pf.get_total_return(),2),13.37) def test_preselect_macd_vol_macro(self): self.bti=presel.PreselMacdVolMacro(self.period,symbol_index=self.symbol_index) @@ -181,7 +181,7 @@ def test_preselect_macd_vol_macro(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),2.74 ) + self.assertEqual(round(pf.get_total_return(),2),-0.41 ) def test_preselect_retard_macro(self): self.bti=presel.PreselRetardMacro(self.period,symbol_index=self.symbol_index) @@ -213,7 +213,7 @@ def test_preselect_divergence_blocked(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),7.37) + self.assertEqual(round(pf.get_total_return(),2),3.94) def test_preselect_divergence_blocked_im(self): self.bti=presel.PreselDivergenceBlockedIm(self.period,symbol_index=self.symbol_index) @@ -229,7 +229,7 @@ def test_preselect_divergence_blocked_im(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),10.81) + self.assertEqual(round(pf.get_total_return(),2),4.7) def test_preselect_vol_slow(self): self.bti=presel.PreselVolSlow(self.period,symbol_index=self.symbol_index) @@ -245,7 +245,7 @@ def test_preselect_vol_slow(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),11.24) + self.assertEqual(round(pf.get_total_return(),2),10.85) def test_preselect_realmadrid(self): self.bti=presel.PreselRealMadrid(self.period,symbol_index=self.symbol_index) @@ -294,7 +294,7 @@ def test_preselect_macd_vol_slow(self): cash_sharing=True, ) - self.assertEqual(round(pf.get_total_return(),2),-0.61) + self.assertEqual(round(pf.get_total_return(),2),-0.75) def test_preselect_hist_vol_slow(self): self.bti=presel.PreselHistVolSlow(self.period,symbol_index=self.symbol_index) diff --git a/py-trading-bot/tests/test_presel_classic.py b/py-trading-bot/tests/test_presel_classic.py index 5b46bf1..f2761b7 100644 --- a/py-trading-bot/tests/test_presel_classic.py +++ b/py-trading-bot/tests/test_presel_classic.py @@ -106,7 +106,7 @@ def test_two_action(self): pf_test3=self.bti.apply_underlying_strat("StratG") #is not equal as targetpercent takes money from one action for the other - self.assertTrue(abs(pf_test2.get_total_return()+pf_test3.get_total_return()-pf_test.get_total_return())<0.25) + self.assertTrue(abs(pf_test2.get_total_return()+pf_test3.get_total_return()-pf_test.get_total_return())<0.40) #needs to check for unique as balancing order can occurs for pf_test. self.assertEqual( diff --git a/py-trading-bot/tests/test_ss_manager.py b/py-trading-bot/tests/test_ss_manager.py index 963a1ae..5f5f385 100644 --- a/py-trading-bot/tests/test_ss_manager.py +++ b/py-trading-bot/tests/test_ss_manager.py @@ -307,13 +307,21 @@ def test_cand_to_quantity(self): cands=["AC.PA"] self.ss_m.cand_to_quantity(cands,"none",False) self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","none"],1) - self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","none"],0) + self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AI.PA","none"])) self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AIR.PA","none"])) + + self.ss_m.present_ss.loc["AC.PA","quantity"]=1 + cands=["AI.PA"] + self.ss_m.cand_to_quantity(cands,"none",False) self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","none"],0) self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","none"],1) self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AIR.PA","none"])) + + self.ss_m.present_ss.loc["AC.PA","quantity"]=0 + self.ss_m.present_ss.loc["AI.PA","quantity"]=1 + cands=["AIR.PA"] self.ss_m.cand_to_quantity(cands,"none",True) self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","none"],0) @@ -348,7 +356,7 @@ def test_order_nosubstrat(self): cands=["AC.PA"] self.ss_m.order_nosubstrat(cands,"Paris","none",False,keep=True) self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","none"],1) - self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","none"],0) + self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AI.PA","none"])) self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AIR.PA","none"])) self.ss_m.present_ss.loc["AC.PA","quantity"]=1 cands=["AI.PA"] @@ -357,7 +365,6 @@ def test_order_nosubstrat(self): self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","retard_keep"],1) self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","none"],1) self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AIR.PA","none"])) - self.ss_m.present_ss.loc["AC.PA","quantity"]=0 self.ss_m.present_ss.loc["AI.PA","quantity"]=1 cands=["AIR.PA"] self.ss_m.order_nosubstrat(cands,"Paris","none",True,keep=True) @@ -365,7 +372,8 @@ def test_order_nosubstrat(self): self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","none"],0) self.assertEqual(self.ss_m.target_ss_by_st.loc["AIR.PA","none"],-1) self.assertEqual(self.ss_m.target_ss_by_st.loc["AC.PA","retard_keep"],1) - self.assertEqual(self.ss_m.target_ss_by_st.loc["AI.PA","retard_keep"],0) + self.assertTrue(np.isnan(self.ss_m.target_ss_by_st.loc["AI.PA","retard_keep"])) + def test_cand_to_quantity_entry(self): self.ss_m.cand_to_quantity_entry([],"none", False) diff --git a/py-trading-bot/tests/test_strat.py b/py-trading-bot/tests/test_strat.py index b1d81e2..bb4036b 100644 --- a/py-trading-bot/tests/test_strat.py +++ b/py-trading-bot/tests/test_strat.py @@ -17,7 +17,7 @@ def setUpClass(self): self.symbol_index="CAC40" def test_symbols_simple_to_complex(self): - self.ust=strat.StratDiv(self.period, symbol_index=self.symbol_index) + self.ust=strat.StratDiv2(self.period, symbol_index=self.symbol_index) self.ust.run() symbol_complex1=self.ust.symbols_simple_to_complex('AI',"ent") self.assertEqual(symbol_complex1,"AI") @@ -46,10 +46,10 @@ def test_strat_kama_stoch_matrend_macdbb_macro(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.21) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.01) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.2) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),3.15) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),4.06) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),2.65) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),4.82) self.ust=strat.StratKamaStochMatrendMacdbbMacro( self.period, @@ -64,10 +64,10 @@ def test_strat_kama_stoch_matrend_macdbb_macro(self): short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),2.04) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.36) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),-0.47) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),3.82) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.65) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),3.23) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),4.09) def test_strat_kama_stoch_matrend_bbands(self): self.ust=strat.StratKamaStochMatrendBbands( @@ -79,10 +79,10 @@ def test_strat_kama_stoch_matrend_bbands(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),2.37) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.44) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),2.88) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.02) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),2.33) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.52) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),4.26) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.21) @@ -120,19 +120,31 @@ def test_stratReal(self): self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),4.08) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[3]],2),-0.36) - def test_stratDiv(self): - self.ust=strat.StratDiv(self.period, symbol_index=self.symbol_index) + def test_stratDiv2(self): + self.ust=strat.StratDiv2(self.period, symbol_index=self.symbol_index) self.ust.run() pf=vbt.Portfolio.from_signals(self.ust.close, self.ust.entries,self.ust.exits, short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),0.16) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.18) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),0.46) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),0.12) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.16) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),0.37) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[3]],2),-0.6) + def test_stratDiv(self): + self.ust=strat.StratDiv(self.period, symbol_index=self.symbol_index) + self.ust.run() + + pf=vbt.Portfolio.from_signals(self.ust.close, self.ust.entries,self.ust.exits, + short_entries=self.ust.entries_short, + short_exits =self.ust.exits_short) + + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),-0.57) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),-0.19) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),-0.22) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[3]],2),-0.85) if __name__ == '__main__': unittest.main() diff --git a/py-trading-bot/tests/test_strat_legacy.py b/py-trading-bot/tests/test_strat_legacy.py index e354a44..c852b99 100644 --- a/py-trading-bot/tests/test_strat_legacy.py +++ b/py-trading-bot/tests/test_strat_legacy.py @@ -38,10 +38,10 @@ def test_strat_kama_stoch_super_bbands(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),2.46) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.23) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),5.89) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.21) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),2.33) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.21) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),7.64) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.84) def test_strat_kama_stoch_matrend_macdbb(self): self.ust=strat_legacy.StratKamaStochMatrendMacdbb(self.period, symbol_index=self.symbol_index) @@ -51,10 +51,10 @@ def test_strat_kama_stoch_matrend_macdbb(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.82) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.39) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.5) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),5.51) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),2.84) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),5.25) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),3.61) def test_strat_kama_stoch_super_macdbb(self): self.ust=strat_legacy.StratKamaStochSuperMacdbb(self.period, symbol_index=self.symbol_index) @@ -64,10 +64,10 @@ def test_strat_kama_stoch_super_macdbb(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.77) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.15) self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.7) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),4.29) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),3.83) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),4.2) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),4.93) def test_strat_kama_stoch_matrend_bbands_macro(self): self.ust=strat_legacy.StratKamaStochMatrendBbandsMacro( @@ -82,10 +82,10 @@ def test_strat_kama_stoch_matrend_bbands_macro(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),0.93) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.39) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),0.64) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),1.35) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),0.92) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),1.47) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),1.29) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),1.76) self.ust=strat_legacy.StratKamaStochMatrendBbandsMacro( self.period, @@ -99,10 +99,10 @@ def test_strat_kama_stoch_matrend_bbands_macro(self): short_entries=self.ust.entries_short, short_exits =self.ust.exits_short) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.92) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.1) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),0.79) - self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),1.73) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[0]],2),1.82) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[1]],2),0.24) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[2]],2),1.86) + self.assertEqual(round(pf.get_total_return()[pf.wrapper.columns[6]],2),1.84) def test_strat_kama_stoch_macro(self): self.ust=strat_legacy.StratKamaStochMacro( diff --git a/requirements.txt b/requirements.txt index ca09932..62d314c 100755 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ celery[redis] >= 5.2.7 #yfinance # normally included in vectorbt #pandas # normally included in vectorbt #numpy # normally included in vectorbt -#python-telegram-bot # normally included in vectorbt +#python-telegram-bot # normally included in vectorbt -> use 13.15, the start in_background does not work for version > 20 joblib ib_insync >= 0.9.70 django-filter >= 22.1 @@ -17,5 +17,7 @@ psycopg2-binary backports.zoneinfo; python_version < '3.9' -#### Only if making machine learning, remove the ordner ml if you don't need it -keras +#### Only if making machine learning, remove the ordner ml if you don't need it. +#### Deactivated by default, as it uses a huge amount of memory. Not suitable for Docker. +# tensorflow +# keras diff --git a/requirements_conda.txt b/requirements_conda.txt new file mode 100644 index 0000000..41e2a86 --- /dev/null +++ b/requirements_conda.txt @@ -0,0 +1,445 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: linux-64 +_libgcc_mutex=0.1=main +_openmp_mutex=5.1=1_gnu +_tflow_select=2.3.0=mkl +abseil-cpp=20211102.0=hd4dd3e8_0 +absl-py=1.4.0=py39h06a4308_0 +aiodns=3.1.1=pypi_0 +aiohttp=3.9.1=pypi_0 +aiosignal=1.3.1=pypi_0 +alabaster=0.7.12=pyhd3eb1b0_0 +alembic=1.13.1=pypi_0 +alpaca-py=0.13.4=pypi_0 +amqp=5.2.0=pypi_0 +annotated-types=0.6.0=pypi_0 +ansi2html=1.8.0=py39h06a4308_0 +anyio=3.5.0=py39h06a4308_0 +appdirs=1.4.4=pypi_0 +apscheduler=3.6.3=pypi_0 +arch=6.2.0=pypi_0 +argon2-cffi=21.3.0=pyhd3eb1b0_0 +argon2-cffi-bindings=21.2.0=py39h7f8727e_0 +arrow=1.2.3=py39h06a4308_1 +asgiref=3.5.2=py39h06a4308_0 +astor=0.8.1=py39h06a4308_0 +astroid=2.14.2=py39h06a4308_0 +astropy=6.0.0=pypi_0 +astropy-iers-data=0.2024.1.1.0.33.39=pypi_0 +asttokens=2.0.5=pyhd3eb1b0_0 +astunparse=1.6.3=py_0 +async-lru=2.0.4=py39h06a4308_0 +async-timeout=4.0.3=py39h06a4308_0 +atomicwrites=1.4.0=py_0 +attrs=23.1.0=py39h06a4308_0 +autopep8=1.6.0=pyhd3eb1b0_1 +babel=2.11.0=py39h06a4308_0 +backcall=0.2.0=pyhd3eb1b0_0 +beautifulsoup4=4.12.2=py39h06a4308_0 +billiard=4.2.0=pypi_0 +binaryornot=0.4.4=pyhd3eb1b0_1 +black=23.11.0=py39h06a4308_0 +blas=1.0=openblas +bleach=4.1.0=pyhd3eb1b0_0 +blinker=1.6.2=py39h06a4308_0 +blosc=1.21.3=h6a678d5_0 +blosc2=2.4.0=pypi_0 +bottleneck=1.3.5=py39h7deecbd_0 +brotli-python=1.0.9=py39h6a678d5_7 +bzip2=1.0.8=h7b6447c_0 +c-ares=1.19.1=h5eee18b_0 +ca-certificates=2023.12.12=h06a4308_0 +cachetools=4.2.2=pyhd3eb1b0_0 +ccxt=4.2.4=pypi_0 +celery=5.3.6=pypi_0 +certifi=2023.11.17=py39h06a4308_0 +cffi=1.16.0=py39h5eee18b_0 +chardet=4.0.0=py39h06a4308_1003 +charset-normalizer=2.0.4=pyhd3eb1b0_0 +clarabel=0.6.0=pypi_0 +click=8.1.7=py39h06a4308_0 +click-didyoumean=0.3.0=pypi_0 +click-plugins=1.1.1=pypi_0 +click-repl=0.3.0=pypi_0 +cloudpickle=3.0.0=pypi_0 +cmake=3.11.1=h307fef2_1 +colorama=0.4.6=py39h06a4308_0 +colorlog=6.8.0=pypi_0 +comm=0.2.1=pypi_0 +contourpy=1.2.0=pypi_0 +cookiecutter=2.5.0=py39h06a4308_0 +cryptography=41.0.3=py39h130f0dd_0 +cvxpy=1.4.1=pypi_0 +cycler=0.12.1=pypi_0 +dash=2.14.2=py39h06a4308_0 +dask=2023.12.1=pypi_0 +dateparser=1.1.8=py39h06a4308_0 +dbus=1.13.18=hb2f20db_0 +debugpy=1.6.7=py39h6a678d5_0 +decorator=5.1.1=pyhd3eb1b0_0 +defusedxml=0.7.1=pyhd3eb1b0_0 +deprecated=1.2.13=py39h06a4308_0 +diff-match-patch=20200713=pyhd3eb1b0_0 +dill=0.3.7=py39h06a4308_0 +django=4.1=py39h06a4308_0 +django-filter=23.5=pypi_0 +docstring-to-markdown=0.11=py39h06a4308_0 +docutils=0.18.1=py39h06a4308_3 +duckdb=0.9.2=pypi_0 +ecos=2.0.12=pypi_0 +eventkit=1.0.3=pypi_0 +exceptiongroup=1.0.4=py39h06a4308_0 +executing=0.8.3=pyhd3eb1b0_0 +expat=2.5.0=h6a678d5_0 +flake8=6.0.0=py39h06a4308_0 +flask=2.2.5=py39h06a4308_0 +flask-compress=1.13=py39h06a4308_0 +flask-cors=3.0.10=pyhd3eb1b0_0 +flatbuffers=2.0.0=h2531618_0 +fontconfig=2.14.1=h4c34cd2_2 +fonttools=4.47.0=pypi_0 +fqdn=1.5.1=pypi_0 +freetype=2.12.1=h4a9f257_0 +frozendict=2.4.0=pypi_0 +frozenlist=1.4.1=pypi_0 +fsspec=2023.12.2=pypi_0 +future=0.18.3=pypi_0 +gast=0.4.0=pyhd3eb1b0_0 +giflib=5.2.1=h5eee18b_3 +glib=2.69.1=he621ea3_2 +google-auth=2.22.0=py39h06a4308_0 +google-auth-oauthlib=0.5.2=py39h06a4308_0 +google-pasta=0.2.0=pyhd3eb1b0_0 +grpc-cpp=1.48.2=h5bf31a4_0 +grpcio=1.48.2=py39h5bf31a4_0 +gst-plugins-base=1.14.1=h6a678d5_1 +gstreamer=1.14.1=h5eee18b_1 +gtest=1.14.0=hdb19cb5_0 +h11=0.14.0=pypi_0 +h5py=2.10.0=py39hec9cf62_0 +hdf5=1.10.6=h3ffc7dd_1 +html5lib=1.1=pypi_0 +httpcore=1.0.2=pypi_0 +httpx=0.25.2=pypi_0 +humanize=3.10.0=pyhd3eb1b0_0 +hyperopt=0.2.7=pypi_0 +ib-insync=0.9.86=pypi_0 +icu=58.2=he6710b0_3 +idna=3.4=py39h06a4308_0 +imageio=2.31.4=py39h06a4308_0 +imagesize=1.4.1=py39h06a4308_0 +importlib-metadata=7.0.0=py39h06a4308_1 +importlib-resources=6.1.1=pypi_0 +importlib_metadata=7.0.0=hd3eb1b0_1 +inflection=0.5.1=pypi_0 +intervaltree=3.1.0=pyhd3eb1b0_0 +ipykernel=6.25.0=py39h2f386ee_0 +ipython=8.15.0=py39h06a4308_0 +ipython_genutils=0.2.0=pyhd3eb1b0_1 +ipywidgets=8.1.1=pypi_0 +isoduration=20.11.0=pypi_0 +isort=5.9.3=pyhd3eb1b0_0 +itsdangerous=2.0.1=pyhd3eb1b0_0 +jaraco.classes=3.2.1=pyhd3eb1b0_0 +jedi=0.18.1=py39h06a4308_1 +jeepney=0.7.1=pyhd3eb1b0_0 +jellyfish=1.0.1=py39hb02cf49_0 +jinja2=3.1.2=py39h06a4308_0 +joblib=1.2.0=py39h06a4308_0 +jpeg=9e=h5eee18b_1 +json5=0.9.6=pyhd3eb1b0_0 +jsonpointer=2.4=pypi_0 +jsonschema=4.19.2=py39h06a4308_0 +jsonschema-specifications=2023.7.1=py39h06a4308_0 +jupyter-dash=0.4.2=py39h06a4308_0 +jupyter-lsp=2.2.0=py39h06a4308_0 +jupyter_client=8.6.0=py39h06a4308_0 +jupyter_core=5.5.0=py39h06a4308_0 +jupyter_events=0.8.0=py39h06a4308_0 +jupyter_server=2.10.0=py39h06a4308_0 +jupyter_server_terminals=0.4.4=py39h06a4308_1 +jupyterlab=4.0.8=py39h06a4308_0 +jupyterlab-widgets=3.0.9=pypi_0 +jupyterlab_pygments=0.1.2=py_0 +jupyterlab_server=2.25.1=py39h06a4308_0 +kaleido=0.2.1=pypi_0 +keras=2.12.0=py39h06a4308_0 +keras-preprocessing=1.1.2=pyhd3eb1b0_0 +keyring=23.13.1=py39h06a4308_0 +kiwisolver=1.4.5=pypi_0 +kombu=5.3.4=pypi_0 +krb5=1.19.4=h568e23c_0 +lazy-object-proxy=1.6.0=py39h27cfd23_0 +lcms2=2.12=h3be6417_0 +ld_impl_linux-64=2.38=h1181459_1 +lerc=3.0=h295c915_0 +libclang=14.0.6=default_hc6dbbc7_1 +libclang13=14.0.6=default_he11475f_1 +libcurl=8.1.1=h91b91d3_0 +libdeflate=1.17=h5eee18b_1 +libedit=3.1.20230828=h5eee18b_0 +libev=4.33=h7f8727e_1 +libevent=2.1.12=h8f2d780_0 +libffi=3.4.4=h6a678d5_0 +libgcc-ng=11.2.0=h1234567_1 +libgfortran-ng=11.2.0=h00389a5_1 +libgfortran5=11.2.0=h1234567_1 +libgomp=11.2.0=h1234567_1 +libllvm14=14.0.6=hdb19cb5_3 +libnghttp2=1.52.0=ha637b67_1 +libopenblas=0.3.21=h043d6bf_0 +libpng=1.6.39=h5eee18b_0 +libpq=12.9=h16c4e8d_3 +libprotobuf=3.20.3=he621ea3_0 +libsodium=1.0.18=h7b6447c_0 +libspatialindex=1.9.3=h2531618_0 +libssh2=1.10.0=h37d81fd_2 +libstdcxx-ng=11.2.0=h1234567_1 +libta-lib=0.4.0=h516909a_0 +libtiff=4.5.1=h6a678d5_0 +libuuid=1.41.5=h5eee18b_0 +libuv=1.44.2=h5eee18b_0 +libwebp=1.3.2=h11a3e52_0 +libwebp-base=1.3.2=h5eee18b_0 +libxcb=1.15=h7f8727e_0 +libxkbcommon=1.0.1=h5eee18b_1 +libxml2=2.10.4=hcbfbd50_0 +llvmlite=0.39.1=pypi_0 +locket=1.0.0=pypi_0 +lxml=5.0.0=pypi_0 +lz4=4.3.2=py39h5eee18b_0 +lz4-c=1.9.4=h6a678d5_0 +mako=1.3.0=pypi_0 +markdown=3.4.1=py39h06a4308_0 +markdown-it-py=2.2.0=py39h06a4308_1 +markupsafe=2.1.3=py39h5eee18b_0 +matplotlib=3.8.2=pypi_0 +matplotlib-inline=0.1.6=py39h06a4308_0 +mccabe=0.7.0=pyhd3eb1b0_0 +mdurl=0.1.0=py39h06a4308_0 +mistune=2.0.4=py39h06a4308_0 +more-itertools=10.1.0=py39h06a4308_0 +msgpack=1.0.7=pypi_0 +multidict=6.0.4=py39h5eee18b_0 +multiprocess=0.70.15=pypi_0 +multitasking=0.0.11=pypi_0 +mypy_extensions=1.0.0=py39h06a4308_0 +nasdaq-data-link=1.0.4=pypi_0 +nbclient=0.8.0=py39h06a4308_0 +nbconvert=7.10.0=py39h06a4308_0 +nbformat=5.9.2=py39h06a4308_0 +ncurses=6.4=h6a678d5_0 +ndindex=1.7=pypi_0 +nest-asyncio=1.5.6=py39h06a4308_0 +networkx=3.2.1=pypi_0 +notebook=7.0.6=py39h06a4308_0 +notebook-shim=0.2.3=py39h06a4308_0 +nspr=4.35=h6a678d5_0 +nss=3.89.1=h6a678d5_0 +numba=0.56.4=pypi_0 +numexpr=2.8.7=py39h286c3b5_0 +numpy=1.23.5=pypi_0 +numpy-base=1.23.3=py39h1e6e340_1 +numpydoc=1.5.0=py39h06a4308_0 +oauthlib=3.2.2=py39h06a4308_0 +openjpeg=2.4.0=h3ad879b_0 +openssl=1.1.1w=h7f8727e_0 +opt_einsum=3.3.0=pyhd3eb1b0_1 +optuna=3.5.0=pypi_0 +orjson=3.9.10=py39h52d8a92_0 +osqp=0.6.3=pypi_0 +overrides=7.4.0=py39h06a4308_0 +packaging=23.1=py39h06a4308_0 +pandas=2.1.4=py39h1128e8f_0 +pandas-datareader=0.10.0=pypi_0 +pandas-ta=0.3.14b0=pypi_0 +pandocfilters=1.5.0=pyhd3eb1b0_0 +parso=0.8.3=pyhd3eb1b0_0 +partd=1.4.1=pypi_0 +pathos=0.3.1=pypi_0 +pathspec=0.10.3=py39h06a4308_0 +patsy=0.5.5=pypi_0 +pcre=8.45=h295c915_0 +peewee=3.17.0=pypi_0 +pexpect=4.8.0=pyhd3eb1b0_3 +pickleshare=0.7.5=pyhd3eb1b0_1003 +pillow=10.0.1=py39ha6cbd5a_0 +pip=23.3.1=py39h06a4308_0 +platformdirs=3.10.0=py39h06a4308_0 +plotly=5.9.0=py39h06a4308_0 +plotly-resampler=0.8.3.2=py39ha9d4c09_3 +pluggy=1.0.0=py39h06a4308_1 +ply=3.11=py39h06a4308_0 +polygon-api-client=1.13.4=pypi_0 +pox=0.3.3=pypi_0 +ppft=1.7.6.7=pypi_0 +prometheus_client=0.14.1=py39h06a4308_0 +prompt-toolkit=3.0.36=py39h06a4308_0 +protobuf=3.20.3=py39h6a678d5_0 +psutil=5.9.0=py39h5eee18b_0 +psycopg2-binary=2.9.9=pypi_0 +ptyprocess=0.7.0=pyhd3eb1b0_2 +pure_eval=0.2.2=pyhd3eb1b0_0 +py-cpuinfo=9.0.0=pypi_0 +py4j=0.10.9.7=pypi_0 +pyarrow=14.0.2=pypi_0 +pyasn1=0.4.8=pyhd3eb1b0_0 +pyasn1-modules=0.2.8=py_0 +pybind11=2.10.4=py39hdb19cb5_0 +pybind11-global=2.10.4=py39hdb19cb5_0 +pycares=4.4.0=pypi_0 +pycodestyle=2.10.0=py39h06a4308_0 +pycparser=2.21=pyhd3eb1b0_0 +pycryptodome=3.19.1=pypi_0 +pydantic=2.5.3=pypi_0 +pydantic-core=2.14.6=pypi_0 +pydocstyle=6.3.0=py39h06a4308_0 +pyerfa=2.0.1.1=pypi_0 +pyflakes=3.0.1=py39h06a4308_0 +pygments=2.15.1=py39h06a4308_1 +pyjwt=2.4.0=py39h06a4308_0 +pylint=2.16.2=py39h06a4308_0 +pylint-venv=2.3.0=py39h06a4308_0 +pyls-spyder=0.4.0=pyhd3eb1b0_0 +pyopenssl=23.2.0=py39h06a4308_0 +pyparsing=3.1.1=pypi_0 +pyportfolioopt=1.5.5=pypi_0 +pyqt=5.15.10=py39h6a678d5_0 +pyqt5-sip=12.13.0=py39h5eee18b_0 +pyqtwebengine=5.15.10=py39h6a678d5_0 +pysocks=1.7.1=py39h06a4308_0 +python=3.9.18=h7a1cb2a_0 +python-binance=1.0.19=pypi_0 +python-dateutil=2.8.2=pyhd3eb1b0_0 +python-fastjsonschema=2.16.2=py39h06a4308_0 +python-flatbuffers=2.0=pyhd3eb1b0_0 +python-json-logger=2.0.7=py39h06a4308_0 +python-lsp-black=1.2.1=py39h06a4308_0 +python-lsp-jsonrpc=1.0.0=pyhd3eb1b0_0 +python-lsp-server=1.7.2=py39h06a4308_0 +python-slugify=5.0.2=pyhd3eb1b0_0 +python-telegram-bot=13.15=pypi_0 +python-tzdata=2023.3=pyhd3eb1b0_0 +python_abi=3.9=2_cp39 +pytoolconfig=1.2.6=py39h06a4308_0 +pytz=2023.3.post1=py39h06a4308_0 +pyxdg=0.27=pyhd3eb1b0_0 +pyyaml=6.0.1=py39h5eee18b_0 +pyzmq=25.1.0=py39h6a678d5_0 +qdarkstyle=3.0.2=pyhd3eb1b0_0 +qdldl=0.1.7.post0=pypi_0 +qstylizer=0.2.2=py39h06a4308_0 +qt-main=5.15.2=h8373d8f_8 +qt-webengine=5.15.9=h9ab4d14_7 +qtawesome=1.2.2=py39h06a4308_0 +qtconsole=5.4.2=py39h06a4308_0 +qtpy=2.4.1=py39h06a4308_0 +quantstats=0.0.62=pypi_0 +re2=2022.04.01=h295c915_0 +readline=8.2=h5eee18b_0 +redis=5.0.3=h7b6447c_0 +redis-py=4.3.4=py39h06a4308_0 +referencing=0.30.2=py39h06a4308_0 +regex=2023.10.3=py39h5eee18b_0 +requests=2.31.0=py39h06a4308_0 +requests-oauthlib=1.3.0=py_0 +retrying=1.3.3=pyhd3eb1b0_2 +rfc3339-validator=0.1.4=py39h06a4308_0 +rfc3986-validator=0.1.1=py39h06a4308_0 +rich=13.3.5=py39h06a4308_0 +riskfolio-lib=4.4.2=pypi_0 +rope=1.7.0=py39h06a4308_0 +rpds-py=0.10.6=py39hb02cf49_0 +rsa=4.7.2=pyhd3eb1b0_1 +rtree=1.0.1=py39h06a4308_0 +schedule=1.2.1=pypi_0 +scikit-learn=1.3.2=pypi_0 +scipy=1.11.4=py39heeff2f4_0 +scs=3.2.4.post1=pypi_0 +seaborn=0.13.1=pypi_0 +secretstorage=3.3.1=py39h06a4308_1 +send2trash=1.8.2=py39h06a4308_0 +setuptools=68.2.2=py39h06a4308_0 +sip=6.7.12=py39h6a678d5_0 +six=1.16.0=pyhd3eb1b0_1 +snappy=1.1.10=h6a678d5_1 +sniffio=1.2.0=py39h06a4308_1 +snowballstemmer=2.2.0=pyhd3eb1b0_0 +sortedcontainers=2.4.0=pyhd3eb1b0_0 +soupsieve=2.5=py39h06a4308_0 +sphinx=5.0.2=py39h06a4308_0 +sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0 +sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0 +sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0 +sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0 +sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0 +sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0 +spyder=5.4.3=py39h06a4308_1 +spyder-kernels=2.4.4=py39h06a4308_0 +sqlite=3.41.2=h5eee18b_0 +sqlparse=0.4.4=py39h06a4308_0 +sseclient-py=1.8.0=pypi_0 +stack_data=0.2.0=pyhd3eb1b0_0 +statsmodels=0.14.1=pypi_0 +ta=0.11.0=pypi_0 +ta-lib=0.4.19=py39hce5d2b2_3 +tables=3.9.2=pypi_0 +tabulate=0.9.0=pypi_0 +tenacity=8.2.2=py39h06a4308_0 +tensorboard=2.12.1=py39h06a4308_0 +tensorboard-data-server=0.7.0=py39h52d8a92_0 +tensorboard-plugin-wit=1.8.1=py39h06a4308_0 +tensorflow=2.12.0=mkl_py39h5ea9445_0 +tensorflow-base=2.12.0=mkl_py39he5f8e37_0 +tensorflow-estimator=2.12.0=py39h06a4308_0 +termcolor=2.1.0=py39h06a4308_0 +terminado=0.17.1=py39h06a4308_0 +text-unidecode=1.3=pyhd3eb1b0_0 +textdistance=4.2.1=pyhd3eb1b0_0 +threadpoolctl=3.2.0=pypi_0 +three-merge=0.1.1=pyhd3eb1b0_0 +tinycss2=1.2.1=py39h06a4308_0 +tk=8.6.12=h1ccaba5_0 +toml=0.10.2=pyhd3eb1b0_0 +tomli=2.0.1=py39h06a4308_0 +tomlkit=0.11.1=py39h06a4308_0 +toolz=0.12.0=pypi_0 +tornado=6.1=pypi_0 +tqdm=4.65.0=py39hb070fc8_0 +trace-updater=0.0.9.1=py39h06a4308_0 +traitlets=5.7.1=py39h06a4308_0 +typing-extensions=4.7.1=py39h06a4308_0 +typing_extensions=4.7.1=py39h06a4308_0 +tzdata=2023c=h04d1e81_0 +tzlocal=2.1=py39h06a4308_1 +ujson=5.9.0=pypi_0 +unidecode=1.2.0=pyhd3eb1b0_0 +universal-portfolios=0.4.12=pypi_0 +uri-template=1.3.0=pypi_0 +urllib3=1.26.18=py39h06a4308_0 +vectorbtpro=2023.12.23=pypi_0 +vine=5.1.0=pypi_0 +watchdog=2.1.6=py39h06a4308_0 +wcwidth=0.2.5=pyhd3eb1b0_0 +webcolors=1.13=pypi_0 +webencodings=0.5.1=py39h06a4308_1 +websocket-client=0.58.0=py39h06a4308_4 +websockets=11.0.3=pypi_0 +werkzeug=2.2.3=py39h06a4308_0 +whatthepatch=1.0.2=py39h06a4308_0 +wheel=0.41.2=py39h06a4308_0 +whitenoise=6.6.0=pypi_0 +widgetsnbextension=4.0.9=pypi_0 +wrapt=1.14.1=py39h5eee18b_0 +wurlitzer=3.0.2=py39h06a4308_0 +xlsxwriter=3.1.9=pypi_0 +xz=5.4.5=h5eee18b_0 +yaml=0.2.5=h7b6447c_0 +yapf=0.31.0=pyhd3eb1b0_0 +yarl=1.9.4=pypi_0 +yfinance=0.2.33=pypi_0 +zeromq=4.3.4=h2531618_0 +zipp=3.17.0=py39h06a4308_0 +zlib=1.2.13=h5eee18b_0 +zstd=1.5.5=hc292b87_0
Start bot