Best Python code snippet using fMBT_python
Portfolio.py
Source:Portfolio.py
1"""2# https://www.investopedia.com/articles/basics/11/5-portfolio-protection-strategies.asp3# DIVERSIFICATION - Icludes 10-30 stocks (Eliminate Unsystematic Risks)4# NON CORRELATING ASSET (Eliminate Systematic Risks)5# IDENTIFICARE UN TREND6# ANALIZZARE LE COSE PER SETTORE7# VOLATILITA'8# ESPOSIZIONE AI FATTORI DI RISCHIO9"""10import os11import pandas as pd12import numpy as np13import random14import plotly.express as px15import matplotlib.pyplot as plt16from sklearn import preprocessing17from sklearn.model_selection import KFold18from sklearn.linear_model import LinearRegression19from scipy import stats20#from sklearn.linear_model import LinearRegression21#from statsmodels.graphics.tsaplots import plot_acf22dir = r"C:\Users\ebiadene\OneDrive - Deloitte (O365D)\00_Project_Proposal\01_Financial_Markets\Algoritmic_Trading\00_Data\SP500_Historical"23os.chdir(dir+ '\..\..')24SP500info = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0]25LargeCap = pd.read_html('https://www.tradingview.com/markets/stocks-usa/market-movers-large-cap/') 26# IMPORTAZIONE DATI E CREAZIONE DIZIONARIO27d = {}28prova = []29for entry in os.listdir(dir):30 if os.path.isfile(os.path.join(dir, entry)):31 d["{0}".format(entry)] = pd.read_csv(dir + '/' + entry)32 print(entry)33 prova.append(entry)34# CREAZIONE DATAFRAME RETURNS 35df_final = pd.DataFrame(columns=['ticker','slope','return_cum'])36df_close = pd.DataFrame(columns= ['ticker', 'date', 'close'])37df_return = pd.DataFrame(columns = ['ticker', 'date', 'return'])38df_return_cum = pd.DataFrame(columns = ['ticker', 'date', 'return_cum'])39df_logreturn = pd.DataFrame(columns = ['ticker', 'date', 'logreturn'])40df_logreturn_cum = pd.DataFrame(columns = ['ticker', 'date', 'logreturn_cum'])41for j in d.keys(): 42 prova = d[j]43 prova = prova[prova['date'] >= '2019-01-01']44 prova['return'] = prova.close.pct_change()45 prova['return_cum'] = (prova['return'] + 1).cumprod()46 prova['logreturn'] = np.log(prova.close/prova.close.shift(1))47 prova['logreturn_cum'] = np.log(prova.close/prova.close.shift(1)).sum()48 prova['ticker'] = j.replace('.csv','') 49 Y = prova['date'].apply( lambda x : int( x.replace('-','') ))50 X = prova['close']51 slope, intercept, r_value, p_value, std_err = stats.linregress(Y, X)52 df = pd.DataFrame( {'ticker' : [j.replace('.csv','')], 'slope' : [slope], 'return_cum' : [prova['return_cum'].iloc[-1]]})53 df1 = prova[['ticker', 'date', 'close']]54 df2 = prova[['ticker', 'date', 'return']]55 df3 = prova[['ticker', 'date', 'return_cum']]56 df4 = prova[['ticker', 'date', 'logreturn']]57 df5 = prova[['ticker', 'date', 'logreturn_cum']]58 df_final = df_final.append(df)59 df_close = df_close.append(df1)60 df_return = df_return.append(df2)61 df_return_cum = df_return_cum.append(df3)62 df_logreturn = df_logreturn.append(df4)63 df_logreturn_cum = df_logreturn_cum.append(df5)64 d[j] = prova65 print(j)66# VISUALIZZAZIONE CURVE67fig = px.line(d['CARR.csv'], x='date', y='close')68fig.write_html('prova.html')69# CREAZIONE DB70stock_df = pd.merge(SP500info[['Symbol', 'Security', 'GICS Sector', 'GICS Sub-Industry']], df_final,71 left_on = 'Symbol', right_on = 'ticker')72Sector = stock_df.groupby('GICS Sector').mean()73Industry = stock_df.groupby('GICS Sub-Industry').mean()74df_close1 = pd.pivot(df_close.dropna(), index = 'date', columns = 'ticker', values = 'close')75df_logreturn= df_logreturn.reset_index()76df_logreturn1 = pd.pivot(df_logreturn.dropna(), index = 'date', columns = 'ticker', values = 'logreturn')77cov_matrix = df_logreturn1.cov() * 25278# PORFOLIO OTIMIZATION 79# Simulating 5000 portfolios80num_port = 500081# Creating an empty array to store portfolio weights82all_wts = np.zeros((num_port, len(df_close1.columns)))83# Creating an empty array to store portfolio returns84port_returns = np.zeros((num_port))85# Creating an empty array to store portfolio risks86port_risk = np.zeros((num_port))87# Creating an empty array to store portfolio sharpe ratio88sharpe_ratio = np.zeros((num_port))89for i in range(num_port):90 #Setting random portfolio weights for each stock91 wts = np.random.uniform(size = 10)92 wts = wts/np.sum(wts)93 for j in wts:94 a = random.randint(0,493)95 all_wts[i,a] = j # saving weights in the array96 97 #Setting portolio returns based on logreturns and weights98 port_ret = np.sum(df_logreturn1.mean() * all_wts[i])99 port_ret = (port_ret + 1) ** 252 - 1100 port_returns[i] = port_ret # Saving Portfolio returns101 # Portfolio Risk102 port_sd = np.sqrt(np.dot(all_wts[i].T, np.dot(cov_matrix, all_wts[i])))103 port_risk[i] = port_sd104 105 # Portfolio Sharpe Ratio106 # Assuming 0% Risk Free Rate107 sr = port_ret / port_sd108 sharpe_ratio[i] = sr109 print(i)110names = df_close1.columns111min_var = all_wts[port_risk.argmin()]112print(min_var)113max_sr = all_wts[sharpe_ratio.argmax()]114print(max_sr)115import matplotlib.pyplot as plt116min_var = pd.Series(min_var, index=names)117min_var = min_var[(min_var.T != 0)]118min_var = min_var.sort_values()119fig = plt.figure()120ax1 = fig.add_axes([0.1,0.1,0.8,0.8])121ax1.set_xlabel('Asset')122ax1.set_ylabel("Weights")123ax1.set_title("Minimum Variance Portfolio weights")124min_var.plot(kind = 'bar')125plt.show();126max_sr = pd.Series(max_sr, index=names)127max_sr = max_sr[(max_sr.T != 0)]128max_sr = max_sr.sort_values()129fig = plt.figure()130ax1 = fig.add_axes([0.1,0.1,0.8,0.8])131ax1.set_xlabel('Asset')132ax1.set_ylabel("Weights")133ax1.set_title("Tangency Portfolio weights")134max_sr.plot(kind = 'bar')135plt.show();136fig = plt.figure()137ax1 = fig.add_axes([0.1,0.1,0.8,0.8])138ax1.set_xlabel('Risk')139ax1.set_ylabel("Returns")140ax1.set_title("Portfolio optimization and Efficient Frontier")141plt.scatter(port_risk, port_returns)...
HW1 NEW.py
Source:HW1 NEW.py
1import pandas as pd2import numpy as np3import matplotlib.pyplot as plt4import scipy.stats as stats5from scipy.stats import ttest_ind6import statsmodels.stats.api as sms7GE=pd.read_csv('C:/Users/anivia/Desktop/geDJ.txt',sep="\s+",header=None,names=['date','open','high','low','close','vol'])8SP=pd.read_csv('https://www.math.ust.hk/~macwyu/MAFS5110_2018-2019/MAFS5110_2018-2019/Chapter_1/sp500.txt',sep = "\s+")9logreturn_GE=np.diff(np.log(np.array(GE["close"])))10logreturn_sp500 = np.diff(np.log(np.array(SP["close"])))11da2 = pd.concat([pd.DataFrame(logreturn_GE), pd.DataFrame(logreturn_sp500)], axis = 1)12#da2.columns=['date','open','high','low','close','vol','logreturn_sp500']13#da2.index=da.index[1:]14da2.columns = ["logreturn_GE","logreturn_sp500"]15da2.boxplot(column=['logreturn_GE','logreturn_sp500'])16plt.show()17print(stats.mood(logreturn_sp500,logreturn_GE))18print('H0 can be rejected, the variances are significantly different')19print(ttest_ind(logreturn_sp500,logreturn_GE,equal_var=True))20print('Means are insignificantly different')21#cm=sms.CompareMeans(sms.DescrStatsW(logreturn_sp500),sms.DescrStatsW(logreturn_GE))22#print('C.I. is ',cm.tconfint_diff())23print('so they are not equal.')24from scipy.stats import ranksums25print(ranksums(logreturn_sp500, logreturn_GE))...
anaHW1.py
Source:anaHW1.py
1# -*- coding: utf-8 -*-2"""3Created on Mon Oct 1 17:35:09 20184@author: anivia5"""6import pandas as pd7import numpy as np8import matplotlib.pyplot as plt9import scipy.stats as stats10from scipy.stats import ttest_ind11import statsmodels.stats.api as sms12GE=pd.read_csv('C:/Users/anivia/Desktop/geDJ.txt',sep="\s+",header=None,names=['date','open','high','low','close','vol'])13SP=pd.read_csv('https://www.math.ust.hk/~macwyu/MAFS5110_2018-2019/MAFS5110_2018-2019/Chapter_1/sp500.txt',sep = "\s+")14logreturn_GE=np.diff(np.log(np.array(GE["close"])))15logreturn_sp500 = np.diff(np.log(np.array(SP["close"])))16da2 = pd.concat([pd.DataFrame(logreturn_GE), pd.DataFrame(logreturn_sp500)], axis = 1)17#da2.columns=['date','open','high','low','close','vol','logreturn_sp500']18#da2.index=da.index[1:]19da2.columns = ["logreturn_GE","logreturn_sp500"]20da2.boxplot(column=['logreturn_GE','logreturn_sp500'])21plt.show()22print(stats.mood(logreturn_sp500,logreturn_GE))23print('H0 can be rejected, the variances are significantly different')24print(ttest_ind(logreturn_sp500,logreturn_GE,equal_var=True))25print('')26cm=sms.CompareMeans(sms.DescrStatsW(logreturn_sp500),sms.DescrStatsW(logreturn_GE))...
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!