diff --git a/pyfolio/bayesian.py b/pyfolio/bayesian.py index f9056480..e8ef6bcf 100644 --- a/pyfolio/bayesian.py +++ b/pyfolio/bayesian.py @@ -32,7 +32,7 @@ from empyrical import cum_returns -def model_returns_t_alpha_beta(data, bmark, samples=2000): +def model_returns_t_alpha_beta(data, bmark, samples=2000, progressbar=True): """ Run Bayesian alpha-beta-model with T distributed returns. @@ -86,12 +86,12 @@ def model_returns_t_alpha_beta(data, bmark, samples=2000): mu=mu_reg, sd=sigma, observed=y) - trace = pm.sample(samples) + trace = pm.sample(samples, progressbar=progressbar) return model, trace -def model_returns_normal(data, samples=500): +def model_returns_normal(data, samples=500, progressbar=True): """ Run Bayesian model assuming returns are normally distributed. @@ -125,11 +125,11 @@ def model_returns_normal(data, samples=500): returns.distribution.variance**.5 * np.sqrt(252)) - trace = pm.sample(samples) + trace = pm.sample(samples, progressbar=progressbar) return model, trace -def model_returns_t(data, samples=500): +def model_returns_t(data, samples=500, progressbar=True): """ Run Bayesian model assuming returns are Student-T distributed. @@ -167,11 +167,11 @@ def model_returns_t(data, samples=500): returns.distribution.variance**.5 * np.sqrt(252)) - trace = pm.sample(samples) + trace = pm.sample(samples, progressbar=progressbar) return model, trace -def model_best(y1, y2, samples=1000): +def model_best(y1, y2, samples=1000, progressbar=True): """ Bayesian Estimation Supersedes the T-Test @@ -252,7 +252,7 @@ def model_best(y1, y2, samples=1000): returns_group2.distribution.variance**.5 * np.sqrt(252)) - trace = pm.sample(samples) + trace = pm.sample(samples, progressbar=progressbar) return model, trace @@ -347,7 +347,7 @@ def distplot_w_perc(trace, ax): ylabel='Belief', yticklabels=[]) -def model_stoch_vol(data, samples=2000): +def model_stoch_vol(data, samples=2000, progressbar=True): """ Run stochastic volatility model. @@ -385,7 +385,7 @@ def model_stoch_vol(data, samples=2000): pm.math.exp(-2 * s)) StudentT('r', nu, lam=volatility_process, observed=data) - trace = pm.sample(samples) + trace = pm.sample(samples, progressbar=progressbar) return model, trace @@ -525,7 +525,7 @@ def _plot_bayes_cone(returns_train, returns_test, def run_model(model, returns_train, returns_test=None, - bmark=None, samples=500, ppc=False): + bmark=None, samples=500, ppc=False, progressbar=True): """ Run one of the Bayesian models. @@ -563,13 +563,18 @@ def run_model(model, returns_train, returns_test=None, if model == 'alpha_beta': model, trace = model_returns_t_alpha_beta(returns_train, - bmark, samples) + bmark, samples, + progressbar=progressbar) elif model == 't': - model, trace = model_returns_t(returns_train, samples) + model, trace = model_returns_t(returns_train, samples, + progressbar=progressbar) elif model == 'normal': - model, trace = model_returns_normal(returns_train, samples) + model, trace = model_returns_normal(returns_train, samples, + progressbar=progressbar) elif model == 'best': - model, trace = model_best(returns_train, returns_test, samples=samples) + model, trace = model_best(returns_train, returns_test, + samples=samples, + progressbar=progressbar) else: raise NotImplementedError( 'Model {} not found.' @@ -577,7 +582,8 @@ def run_model(model, returns_train, returns_test=None, if ppc: ppc_samples = pm.sample_ppc(trace, samples=samples, - model=model, size=len(returns_test)) + model=model, size=len(returns_test), + progressbar=progressbar) return trace, ppc_samples['returns'] return trace diff --git a/pyfolio/perf_attrib.py b/pyfolio/perf_attrib.py index b48be2b2..e5faab3b 100644 --- a/pyfolio/perf_attrib.py +++ b/pyfolio/perf_attrib.py @@ -14,10 +14,11 @@ # limitations under the License. from __future__ import division +from collections import OrderedDict import empyrical as ep import pandas as pd - import matplotlib.pyplot as plt + from pyfolio.pos import get_percent_alloc from pyfolio.utils import print_table, set_legend_location @@ -124,12 +125,12 @@ def perf_attrib(returns, positions, factor_returns, factor_loadings, pd.concat([perf_attrib_by_factor, returns_df], axis='columns')) -def create_perf_attrib_stats(perf_attrib): +def create_perf_attrib_stats(perf_attrib, risk_exposures): """ Takes perf attribution data over a period of time and computes annualized multifactor alpha, multifactor sharpe, risk exposures. """ - summary = {} + summary = OrderedDict() specific_returns = perf_attrib['specific_returns'] common_returns = perf_attrib['common_returns'] @@ -139,6 +140,8 @@ def create_perf_attrib_stats(perf_attrib): summary['Multi-factor sharpe'] =\ ep.sharpe_ratio(specific_returns) + # empty line between common/specific/total returns + summary[' '] = ' ' summary['Cumulative specific returns'] =\ ep.cum_returns_final(specific_returns) summary['Cumulative common returns'] =\ @@ -147,7 +150,9 @@ def create_perf_attrib_stats(perf_attrib): ep.cum_returns_final(perf_attrib['total_returns']) summary = pd.Series(summary) - return summary + + risk_exposure_summary = risk_exposures.sum(axis='rows') + return summary, risk_exposure_summary def show_perf_attrib_stats(returns, positions, factor_returns, @@ -164,12 +169,14 @@ def show_perf_attrib_stats(returns, positions, factor_returns, pos_in_dollars=pos_in_dollars, ) - perf_attrib_stats = create_perf_attrib_stats(perf_attrib_data) + perf_attrib_stats, risk_exposure_stats =\ + create_perf_attrib_stats(perf_attrib_data, risk_exposures) + print_table(perf_attrib_stats) - print_table(risk_exposures) + print_table(risk_exposure_stats) -def plot_returns(perf_attrib_data, ax=None): +def plot_returns(perf_attrib_data, cost=None, ax=None): """ Plot total, specific, and common returns. @@ -177,13 +184,18 @@ def plot_returns(perf_attrib_data, ax=None): ---------- perf_attrib_data : pd.DataFrame df with factors, common returns, and specific returns as columns, - and datetimes as index + and datetimes as index. Assumes the `total_returns` column is NOT + cost adjusted. - Example: momentum reversal common_returns specific_returns dt 2017-01-01 0.249087 0.935925 1.185012 1.185012 2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980 + cost : pd.Series, optional + if present, gets subtracted from `perf_attrib_data['total_returns']`, + and gets plotted separately + ax : matplotlib.axes.Axes axes on which plots are made. if None, current axes will be used @@ -191,19 +203,29 @@ def plot_returns(perf_attrib_data, ax=None): ------- ax : matplotlib.axes.Axes """ + if ax is None: ax = plt.gca() returns = perf_attrib_data['total_returns'] + total_returns_label = 'Total returns' + + if cost is not None: + returns = returns - cost + total_returns_label += ' (adjusted)' + specific_returns = perf_attrib_data['specific_returns'] common_returns = perf_attrib_data['common_returns'] - ax.plot(ep.cum_returns(returns), color='g', label='Total returns') + ax.plot(ep.cum_returns(returns), color='g', label=total_returns_label) ax.plot(ep.cum_returns(specific_returns), color='b', label='Cumulative specific returns') ax.plot(ep.cum_returns(common_returns), color='r', label='Cumulative common returns') + if cost is not None: + ax.plot(cost, color='p', label='Cost') + ax.set_title('Time series of cumulative returns') ax.set_ylabel('Returns') diff --git a/pyfolio/tears.py b/pyfolio/tears.py index 4fcf8fb9..fb8d0be3 100644 --- a/pyfolio/tears.py +++ b/pyfolio/tears.py @@ -1065,7 +1065,8 @@ def create_capacity_tear_sheet(returns, positions, transactions, @plotting.customize def create_bayesian_tear_sheet(returns, benchmark_rets=None, live_start_date=None, samples=2000, - return_fig=False, stoch_vol=False): + return_fig=False, stoch_vol=False, + progressbar=True): """ Generate a number of Bayesian distributions and a Bayesian cone plot of returns. @@ -1134,14 +1135,16 @@ def create_bayesian_tear_sheet(returns, benchmark_rets=None, trace_t, ppc_t = bayesian.run_model('t', df_train, returns_test=df_test, - samples=samples, ppc=True) + samples=samples, ppc=True, + progressbar=progressbar) previous_time = timer("T model", previous_time) # Compute BEST model print("\nRunning BEST model") trace_best = bayesian.run_model('best', df_train, returns_test=df_test, - samples=samples) + samples=samples, + progressbar=progressbar) previous_time = timer("BEST model", previous_time) # Plot results @@ -1213,7 +1216,8 @@ def create_bayesian_tear_sheet(returns, benchmark_rets=None, benchmark_rets = benchmark_rets.loc[df_train.index] trace_alpha_beta = bayesian.run_model('alpha_beta', df_train, bmark=benchmark_rets, - samples=samples) + samples=samples, + progressbar=progressbar) previous_time = timer("running alpha beta model", previous_time) # Plot alpha and beta diff --git a/pyfolio/tests/test_tears.py b/pyfolio/tests/test_tears.py index 551355bf..937e2995 100644 --- a/pyfolio/tests/test_tears.py +++ b/pyfolio/tests/test_tears.py @@ -128,4 +128,5 @@ def test_create_bayesian_tear_sheet_breakdown(self, kwargs): create_bayesian_tear_sheet( self.test_returns, live_start_date=self.test_returns.index[-20], + progressbar=False, **kwargs)