-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathportfolio_optimization.py
133 lines (103 loc) · 5.78 KB
/
portfolio_optimization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import pandas as pd;
import pandas_datareader.data as web;
import numpy as np;
import matplotlib.pyplot as plt;
# List of stocks composing the portfolio
stocks = ['AAPL', 'FB', 'GOOG', 'MSFT', 'NVDA']
# Gathering historical returns for each stock
data = web.DataReader(stocks,data_source="google",start='01/01/2016')['Close']
returns = data.pct_change()
# CLeaning datas
returns = returns.dropna(axis=0, how='any');
# Daily mean returns, covariance and correlation between stocks
returns_mean = returns.mean()
returns_covariance_matrix = returns.cov()
# Maximum drawdown calculator for a given data_frame
def max_drawdown(portfolio_data_frame, weights, time_period):
simulated_portfolio = weights[0]*portfolio_data_frame.ix[:,0]
for i in range(1, len(portfolio_data_frame.columns)):
simulated_portfolio += weights[i]*portfolio_data_frame.ix[:,i]
max_drawdown_value = float('-inf')
for i in range(int(len(simulated_portfolio)/time_period)-1):
biggest_variation = max(simulated_portfolio[i*time_period:(i+1)*time_period])/min(simulated_portfolio[i*time_period:(i+1)*time_period])
if(biggest_variation > max_drawdown_value):
max_drawdown_value = biggest_variation
return max_drawdown_value
# Parameter for the Monte-Carlo simulation
nbr_iteration = 40000
simulated_portfolios = np.zeros((4, nbr_iteration)) # mean, std, Sharpe ratio and max drawdown
simulated_weights = []
risk_free_rate = 0.01 # Livret A
nbr_trading_days = 252
time_period_drawdown = 20
resolution = 4
lower_bound = 10**(-resolution)
upper_bound = 0.4
np.random.seed()
for index in range(nbr_iteration):
# Randomly creating the array of weight and then normalizing such that the sum equals 1
#
invalid_weights = True
while(invalid_weights):
weights = (upper_bound-lower_bound)*np.random.rand(1, len(stocks))[0] + lower_bound
weights = weights/sum(weights)
for i in range(len(stocks)-1):
weights[i] = np.round(weights[i], resolution)
weights[len(stocks)-1] = 1.0 - np.sum(weights[0:len(stocks)-2])
if((weights > lower_bound).all() and (weights < upper_bound).all()):
invalid_weights = False
simulated_weights.append(weights)
# Computing the return and volatility of the portfolio with those weights
portfolio_return = np.sum(returns_mean.values * weights * nbr_trading_days)
portfolio_volatility = np.sqrt(np.dot(weights.T, np.dot(returns_covariance_matrix.values, weights))* nbr_trading_days**2)
# Store results of the simulation
simulated_portfolios[0, index] = portfolio_return
simulated_portfolios[1, index] = portfolio_volatility
simulated_portfolios[2, index] = (portfolio_return - risk_free_rate) / portfolio_volatility
simulated_portfolios[3, index] = max_drawdown(data, weights, time_period_drawdown)
# Initializing the figure to plot
figure, axarr = plt.subplots(1,2)
# Diplaying results of the simulation
simulated_portfolios_df = pd.DataFrame(simulated_portfolios.T,columns=['retrn','stdv','sharpe', 'max_drdwn'])
scat = axarr[0].scatter(simulated_portfolios_df.stdv,simulated_portfolios_df.retrn
,c=simulated_portfolios_df.sharpe,cmap='RdYlBu', label='_nolegend_')
figure.colorbar(scat, ax = axarr[0])
axarr[0].set_title("Portefeuilles simulés")
# Finding the maximum Sharpe ratio, minimum volatility and minimum drawdown
highest_sharpe_position = simulated_portfolios_df['sharpe'].idxmax()
highest_sharpe = simulated_portfolios_df.iloc[highest_sharpe_position]
highest_sharpe_weights = simulated_weights[highest_sharpe_position]
lowest_volatility_position = simulated_portfolios_df['stdv'].idxmin()
lowest_volatility = simulated_portfolios_df.iloc[lowest_volatility_position]
lowest_volatility_weights = simulated_weights[lowest_volatility_position]
lowest_drawdown_position = simulated_portfolios_df['max_drdwn'].idxmin()
lowest_drawdown = simulated_portfolios_df.iloc[lowest_drawdown_position]
lowest_drawdown_weights = simulated_weights[lowest_drawdown_position]
# Plotting the three optimal points on the efficient curve
axarr[0].scatter(highest_sharpe[1],highest_sharpe[0],marker=(4,0,0),color='b',s=400, label="Sharpe")
axarr[0].scatter(lowest_volatility[1],lowest_volatility[0],marker=(4,0,0),color='g',s=400, label="Volatilité")
axarr[0].scatter(lowest_drawdown[1],lowest_drawdown[0],marker=(4,0,0),color='r',s=400, label="Maximum drawdown")
axarr[0].legend()
# Computing the optimized portfolios returns
data_sharpe_weighted = highest_sharpe_weights[0]*data.ix[:,0]
data_equally_weighted = (1/len(stocks))*data.ix[:,0]
data_volatility_weighted = lowest_volatility_weights[0]*data.ix[:,0]
data_drawdown_weighted = lowest_drawdown_weights[0]*data.ix[:,0]
for i in range(1, len(stocks)):
data_sharpe_weighted += highest_sharpe_weights[i]*data.ix[:,i]
data_equally_weighted += (1/len(stocks))*data.ix[:,i]
data_volatility_weighted += lowest_volatility_weights[i]*data.ix[:,i]
data_drawdown_weighted += lowest_drawdown_weights[i]*data.ix[:,i]
# Normalizing
data_sharpe_weighted = data_sharpe_weighted/data_sharpe_weighted[0]
data_equally_weighted = data_equally_weighted/data_equally_weighted[0]
data_volatility_weighted = data_volatility_weighted/data_volatility_weighted[0]
data_drawdown_weighted = data_drawdown_weighted/data_drawdown_weighted[0]
# Plotting the optimized portfolios returns
axarr[1].plot(data_sharpe_weighted, label="Optimisation via Sharpe")
axarr[1].plot(data_equally_weighted, label="Uniformément pondéré")
axarr[1].plot(data_volatility_weighted, label="Optimisation de la volatilité")
axarr[1].plot(data_drawdown_weighted, label="Optimisation du max drawdown")
axarr[1].legend()
axarr[1].set_title("Comparaison des retours")
plt.show()