Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Summary table #442

Merged
merged 4 commits into from
Sep 27, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 22 additions & 16 deletions pyfolio/bayesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from empyrical import cum_returns


def model_returns_t_alpha_beta(data, bmark, samples=2000):
def model_returns_t_alpha_beta(data, bmark, samples=2000, progressbar=True):
"""
Run Bayesian alpha-beta-model with T distributed returns.

Expand Down Expand Up @@ -86,12 +86,12 @@ def model_returns_t_alpha_beta(data, bmark, samples=2000):
mu=mu_reg,
sd=sigma,
observed=y)
trace = pm.sample(samples)
trace = pm.sample(samples, progressbar=progressbar)

return model, trace


def model_returns_normal(data, samples=500):
def model_returns_normal(data, samples=500, progressbar=True):
"""
Run Bayesian model assuming returns are normally distributed.

Expand Down Expand Up @@ -125,11 +125,11 @@ def model_returns_normal(data, samples=500):
returns.distribution.variance**.5 *
np.sqrt(252))

trace = pm.sample(samples)
trace = pm.sample(samples, progressbar=progressbar)
return model, trace


def model_returns_t(data, samples=500):
def model_returns_t(data, samples=500, progressbar=True):
"""
Run Bayesian model assuming returns are Student-T distributed.

Expand Down Expand Up @@ -167,11 +167,11 @@ def model_returns_t(data, samples=500):
returns.distribution.variance**.5 *
np.sqrt(252))

trace = pm.sample(samples)
trace = pm.sample(samples, progressbar=progressbar)
return model, trace


def model_best(y1, y2, samples=1000):
def model_best(y1, y2, samples=1000, progressbar=True):
"""
Bayesian Estimation Supersedes the T-Test

Expand Down Expand Up @@ -252,7 +252,7 @@ def model_best(y1, y2, samples=1000):
returns_group2.distribution.variance**.5 *
np.sqrt(252))

trace = pm.sample(samples)
trace = pm.sample(samples, progressbar=progressbar)
return model, trace


Expand Down Expand Up @@ -347,7 +347,7 @@ def distplot_w_perc(trace, ax):
ylabel='Belief', yticklabels=[])


def model_stoch_vol(data, samples=2000):
def model_stoch_vol(data, samples=2000, progressbar=True):
"""
Run stochastic volatility model.

Expand Down Expand Up @@ -385,7 +385,7 @@ def model_stoch_vol(data, samples=2000):
pm.math.exp(-2 * s))
StudentT('r', nu, lam=volatility_process, observed=data)

trace = pm.sample(samples)
trace = pm.sample(samples, progressbar=progressbar)

return model, trace

Expand Down Expand Up @@ -525,7 +525,7 @@ def _plot_bayes_cone(returns_train, returns_test,


def run_model(model, returns_train, returns_test=None,
bmark=None, samples=500, ppc=False):
bmark=None, samples=500, ppc=False, progressbar=True):
"""
Run one of the Bayesian models.

Expand Down Expand Up @@ -563,21 +563,27 @@ def run_model(model, returns_train, returns_test=None,

if model == 'alpha_beta':
model, trace = model_returns_t_alpha_beta(returns_train,
bmark, samples)
bmark, samples,
progressbar=progressbar)
elif model == 't':
model, trace = model_returns_t(returns_train, samples)
model, trace = model_returns_t(returns_train, samples,
progressbar=progressbar)
elif model == 'normal':
model, trace = model_returns_normal(returns_train, samples)
model, trace = model_returns_normal(returns_train, samples,
progressbar=progressbar)
elif model == 'best':
model, trace = model_best(returns_train, returns_test, samples=samples)
model, trace = model_best(returns_train, returns_test,
samples=samples,
progressbar=progressbar)
else:
raise NotImplementedError(
'Model {} not found.'
'Use alpha_beta, t, normal, or best.'.format(model))

if ppc:
ppc_samples = pm.sample_ppc(trace, samples=samples,
model=model, size=len(returns_test))
model=model, size=len(returns_test),
progressbar=progressbar)
return trace, ppc_samples['returns']

return trace
Expand Down
40 changes: 31 additions & 9 deletions pyfolio/perf_attrib.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@
# limitations under the License.
from __future__ import division

from collections import OrderedDict
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be grouped with the standard lib imports

import empyrical as ep
import pandas as pd

import matplotlib.pyplot as plt

from pyfolio.pos import get_percent_alloc
from pyfolio.utils import print_table, set_legend_location

Expand Down Expand Up @@ -124,12 +125,12 @@ def perf_attrib(returns, positions, factor_returns, factor_loadings,
pd.concat([perf_attrib_by_factor, returns_df], axis='columns'))


def create_perf_attrib_stats(perf_attrib):
def create_perf_attrib_stats(perf_attrib, risk_exposures):
"""
Takes perf attribution data over a period of time and computes annualized
multifactor alpha, multifactor sharpe, risk exposures.
"""
summary = {}
summary = OrderedDict()
specific_returns = perf_attrib['specific_returns']
common_returns = perf_attrib['common_returns']

Expand All @@ -139,6 +140,8 @@ def create_perf_attrib_stats(perf_attrib):
summary['Multi-factor sharpe'] =\
ep.sharpe_ratio(specific_returns)

# empty line between common/specific/total returns
summary[' '] = ' '
summary['Cumulative specific returns'] =\
ep.cum_returns_final(specific_returns)
summary['Cumulative common returns'] =\
Expand All @@ -147,7 +150,9 @@ def create_perf_attrib_stats(perf_attrib):
ep.cum_returns_final(perf_attrib['total_returns'])

summary = pd.Series(summary)
return summary

risk_exposure_summary = risk_exposures.sum(axis='rows')
return summary, risk_exposure_summary


def show_perf_attrib_stats(returns, positions, factor_returns,
Expand All @@ -164,46 +169,63 @@ def show_perf_attrib_stats(returns, positions, factor_returns,
pos_in_dollars=pos_in_dollars,
)

perf_attrib_stats = create_perf_attrib_stats(perf_attrib_data)
perf_attrib_stats, risk_exposure_stats =\
create_perf_attrib_stats(perf_attrib_data, risk_exposures)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thoughts on having perf_attrib() and create_perf_attrib_stats() returning things in the same order for consistency? I.e. changing this to risk_exposure_stat, perf_attrib_stats = ...


print_table(perf_attrib_stats)
print_table(risk_exposures)
print_table(risk_exposure_stats)


def plot_returns(perf_attrib_data, ax=None):
def plot_returns(perf_attrib_data, cost=None, ax=None):
"""
Plot total, specific, and common returns.

Parameters
----------
perf_attrib_data : pd.DataFrame
df with factors, common returns, and specific returns as columns,
and datetimes as index
and datetimes as index. Assumes the `total_returns` column is NOT
cost adjusted.
- Example:
momentum reversal common_returns specific_returns
dt
2017-01-01 0.249087 0.935925 1.185012 1.185012
2017-01-02 -0.003194 -0.400786 -0.403980 -0.403980

cost : pd.Series, optional
if present, gets subtracted from `perf_attrib_data['total_returns']`,
and gets plotted separately

ax : matplotlib.axes.Axes
axes on which plots are made. if None, current axes will be used

Returns
-------
ax : matplotlib.axes.Axes
"""

if ax is None:
ax = plt.gca()

returns = perf_attrib_data['total_returns']
total_returns_label = 'Total returns'

if cost is not None:
returns = returns - cost
total_returns_label += ' (adjusted)'

specific_returns = perf_attrib_data['specific_returns']
common_returns = perf_attrib_data['common_returns']

ax.plot(ep.cum_returns(returns), color='g', label='Total returns')
ax.plot(ep.cum_returns(returns), color='g', label=total_returns_label)
ax.plot(ep.cum_returns(specific_returns), color='b',
label='Cumulative specific returns')
ax.plot(ep.cum_returns(common_returns), color='r',
label='Cumulative common returns')

if cost is not None:
ax.plot(cost, color='p', label='Cost')

ax.set_title('Time series of cumulative returns')
ax.set_ylabel('Returns')

Expand Down
12 changes: 8 additions & 4 deletions pyfolio/tears.py
Original file line number Diff line number Diff line change
Expand Up @@ -1065,7 +1065,8 @@ def create_capacity_tear_sheet(returns, positions, transactions,
@plotting.customize
def create_bayesian_tear_sheet(returns, benchmark_rets=None,
live_start_date=None, samples=2000,
return_fig=False, stoch_vol=False):
return_fig=False, stoch_vol=False,
progressbar=True):
"""
Generate a number of Bayesian distributions and a Bayesian
cone plot of returns.
Expand Down Expand Up @@ -1134,14 +1135,16 @@ def create_bayesian_tear_sheet(returns, benchmark_rets=None,

trace_t, ppc_t = bayesian.run_model('t', df_train,
returns_test=df_test,
samples=samples, ppc=True)
samples=samples, ppc=True,
progressbar=progressbar)
previous_time = timer("T model", previous_time)

# Compute BEST model
print("\nRunning BEST model")
trace_best = bayesian.run_model('best', df_train,
returns_test=df_test,
samples=samples)
samples=samples,
progressbar=progressbar)
previous_time = timer("BEST model", previous_time)

# Plot results
Expand Down Expand Up @@ -1213,7 +1216,8 @@ def create_bayesian_tear_sheet(returns, benchmark_rets=None,
benchmark_rets = benchmark_rets.loc[df_train.index]
trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,
bmark=benchmark_rets,
samples=samples)
samples=samples,
progressbar=progressbar)
previous_time = timer("running alpha beta model", previous_time)

# Plot alpha and beta
Expand Down
1 change: 1 addition & 0 deletions pyfolio/tests/test_tears.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,4 +128,5 @@ def test_create_bayesian_tear_sheet_breakdown(self, kwargs):
create_bayesian_tear_sheet(
self.test_returns,
live_start_date=self.test_returns.index[-20],
progressbar=False,
**kwargs)