Browse Source

renamed files to prevent testing errors in tox

find_closest_changes
Gourav Kumar 2 years ago
parent
commit
8c159062f5
  1. 21
      Check3.py
  2. 118
      check.py
  3. 100
      check2.py
  4. 0
      check_series.py
  5. 26
      my_checks.py
  6. 26
      my_test.py
  7. 34
      test.py
  8. 52
      test2.py

21
Check3.py

@ -0,0 +1,21 @@
import datetime
import math
import random
import time
from typing import List
from dateutil.relativedelta import relativedelta
import pyfacts as pft
data = [
("2021-01-01", 10),
("2021-02-01", 12),
("2021-03-01", 14),
("2021-04-01", 16),
("2021-05-01", 18),
("2021-06-01", 20),
]
ts = pft.TimeSeries(data)
print(repr(ts))

118
check.py

@ -0,0 +1,118 @@
import datetime
import math
import random
# import time
from typing import List
from dateutil.relativedelta import relativedelta
import pyfacts as pft
def create_prices(s0: float, mu: float, sigma: float, num_prices: int) -> list:
"""Generates a price following a geometric brownian motion process based on the input of the arguments.
Since this function is used only to generate data for tests, the seed is fixed as 1234.
Many of the tests rely on exact values generated using this seed.
If the seed is changed, those tests will fail.
Parameters:
------------
s0: float
Asset inital price.
mu: float
Interest rate expressed annual terms.
sigma: float
Volatility expressed annual terms.
num_prices: int
number of prices to generate
Returns:
--------
Returns a list of values generated using GBM algorithm
"""
random.seed(1234) # WARNING! Changing the seed will cause most tests to fail
all_values = []
for _ in range(num_prices):
s0 *= math.exp(
(mu - 0.5 * sigma**2) * (1.0 / 365.0) + sigma * math.sqrt(1.0 / 365.0) * random.gauss(mu=0, sigma=1)
)
all_values.append(round(s0, 2))
return all_values
def sample_data_generator(
frequency: pft.Frequency,
num: int = 1000,
skip_weekends: bool = False,
mu: float = 0.1,
sigma: float = 0.05,
eomonth: bool = False,
) -> List[tuple]:
"""Creates TimeSeries data
Parameters:
-----------
frequency: Frequency
The frequency of the time series data to be generated.
num: int
Number of date: value pairs to be generated.
skip_weekends: bool
Whether weekends (saturday, sunday) should be skipped.
Gets used only if the frequency is daily.
mu: float
Mean return for the values.
sigma: float
standard deviation of the values.
Returns:
--------
Returns a TimeSeries object
"""
start_date = datetime.datetime(2017, 1, 1)
timedelta_dict = {
frequency.freq_type: int(
frequency.value * num * (7 / 5 if frequency == pft.AllFrequencies.D and skip_weekends else 1)
)
}
end_date = start_date + relativedelta(**timedelta_dict)
dates = pft.create_date_series(start_date, end_date, frequency.symbol, skip_weekends=skip_weekends, eomonth=eomonth)
values = create_prices(1000, mu, sigma, num)
ts = list(zip(dates, values))
return ts
market_data = sample_data_generator(num=3600, frequency=pft.AllFrequencies.D, skip_weekends=False)
mts = pft.TimeSeries(market_data, "D")
print(mts)
# print("Datediff=", (mts.end_date - mts.start_date).days)
# stock_data = sample_data_generator(num=3600, frequency=pft.AllFrequencies.D, skip_weekends=False, mu=0.12, sigma=0.15)
# sts = pft.TimeSeries(stock_data, "D")
# print(sts)
# start = time.time()
# alpha = pft.jensens_alpha(
# asset_data=sts, market_data=mts, risk_free_rate=0.052, return_period_unit="months", return_period_value=1
# )
# print(alpha)
# print("Alpha calculation took", time.time() - start, "seconds")
# print("Correlation=", pft.correlation(sts, mts))
rr = mts.calculate_rolling_returns(frequency="D")
print(117, rr[rr.values < 0.1])

100
check2.py

@ -0,0 +1,100 @@
import datetime
import math
import random
import time
from typing import List
from dateutil.relativedelta import relativedelta
import pyfacts as pft
def create_prices(s0: float, mu: float, sigma: float, num_prices: int) -> list:
"""Generates a price following a geometric brownian motion process based on the input of the arguments.
Since this function is used only to generate data for tests, the seed is fixed as 1234.
Many of the tests rely on exact values generated using this seed.
If the seed is changed, those tests will fail.
Parameters:
------------
s0: float
Asset inital price.
mu: float
Interest rate expressed annual terms.
sigma: float
Volatility expressed annual terms.
num_prices: int
number of prices to generate
Returns:
--------
Returns a list of values generated using GBM algorithm
"""
random.seed(1234) # WARNING! Changing the seed will cause most tests to fail
all_values = []
for _ in range(num_prices):
s0 *= math.exp(
(mu - 0.5 * sigma**2) * (1.0 / 365.0) + sigma * math.sqrt(1.0 / 365.0) * random.gauss(mu=0, sigma=1)
)
all_values.append(round(s0, 2))
return all_values
def sample_data_generator(
frequency: pft.Frequency,
num: int = 1000,
skip_weekends: bool = False,
mu: float = 0.1,
sigma: float = 0.05,
eomonth: bool = False,
) -> List[tuple]:
"""Creates TimeSeries data
Parameters:
-----------
frequency: Frequency
The frequency of the time series data to be generated.
num: int
Number of date: value pairs to be generated.
skip_weekends: bool
Whether weekends (saturday, sunday) should be skipped.
Gets used only if the frequency is daily.
mu: float
Mean return for the values.
sigma: float
standard deviation of the values.
Returns:
--------
Returns a TimeSeries object
"""
start_date = datetime.datetime(2017, 1, 1)
timedelta_dict = {
frequency.freq_type: int(
frequency.value * num * (7 / 5 if frequency == pft.AllFrequencies.D and skip_weekends else 1)
)
}
end_date = start_date + relativedelta(**timedelta_dict)
dates = pft.create_date_series(start_date, end_date, frequency.symbol, skip_weekends=skip_weekends, eomonth=eomonth)
values = create_prices(1000, mu, sigma, num)
ts = list(zip(dates, values))
return ts
market_data = sample_data_generator(num=3600, frequency=pft.AllFrequencies.D, skip_weekends=False)
mts = pft.TimeSeries(market_data, "D")
print(mts)
sortino = pft.sortino_ratio(mts, risk_free_rate=0.05)
print(sortino)

0
test_series.py → check_series.py

26
my_checks.py

@ -0,0 +1,26 @@
import datetime
import time
import timeit
import pandas
from pyfacts.pyfacts import AllFrequencies, TimeSeries, create_date_series
dfd = pandas.read_csv("test_files/msft.csv")
dfm = pandas.read_csv("test_files/nav_history_monthly.csv")
dfq = pandas.read_csv("test_files/nav_history_quarterly.csv")
data_d = [(i.date, i.nav) for i in dfd.itertuples()]
data_m = [{"date": i.date, "value": i.nav} for i in dfm.itertuples()]
data_q = {i.date: i.nav for i in dfq.itertuples()}
data_q.update({"14-02-2022": 93.7})
tsd = TimeSeries(data_d, frequency="D")
tsm = TimeSeries(data_m, frequency="M", date_format="%d-%m-%Y")
tsq = TimeSeries(data_q, frequency="Q", date_format="%d-%m-%Y")
start = time.time()
# ts.calculate_rolling_returns(datetime.datetime(2015, 1, 1), datetime.datetime(2022, 2, 1), years=1)
bdata = tsq.bfill()
# rr = tsd.calculate_rolling_returns(datetime.datetime(2022, 1, 1), datetime.datetime(2022, 2, 1), years=1)
print(time.time() - start)

26
my_test.py

@ -1,26 +0,0 @@
import datetime
import time
import timeit
import pandas
from fincal.fincal import AllFrequencies, TimeSeries, create_date_series
dfd = pandas.read_csv('test_files/msft.csv')
dfm = pandas.read_csv('test_files/nav_history_monthly.csv')
dfq = pandas.read_csv('test_files/nav_history_quarterly.csv')
data_d = [(i.date, i.nav) for i in dfd.itertuples()]
data_m = [{'date': i.date, 'value': i.nav} for i in dfm.itertuples()]
data_q = {i.date: i.nav for i in dfq.itertuples()}
data_q.update({'14-02-2022': 93.7})
tsd = TimeSeries(data_d, frequency='D')
tsm = TimeSeries(data_m, frequency='M', date_format='%d-%m-%Y')
tsq = TimeSeries(data_q, frequency='Q', date_format='%d-%m-%Y')
start = time.time()
# ts.calculate_rolling_returns(datetime.datetime(2015, 1, 1), datetime.datetime(2022, 2, 1), years=1)
bdata = tsq.bfill()
# rr = tsd.calculate_rolling_returns(datetime.datetime(2022, 1, 1), datetime.datetime(2022, 2, 1), years=1)
print(time.time() - start)

34
test.py

@ -1,34 +0,0 @@
# from fincal.core import FincalOptions
import fincal as fc
data = [
("2022-01-01", 150),
("2022-01-02", 152),
("2022-01-03", 151),
("2022-01-04", 154),
("2022-01-05", 150),
("2022-01-06", 157),
("2022-01-07", 155),
("2022-01-08", 158),
("2022-01-09", 162),
("2022-01-10", 160),
("2022-01-11", 156),
("2022-01-12", 162),
("2023-01-01", 164),
("2023-01-02", 161),
("2023-01-03", 167),
("2023-01-04", 168),
]
ts = fc.TimeSeries(data, frequency="D", date_format="%Y-%d-%m")
print(ts)
sharpe = fc.sharpe_ratio(
ts,
risk_free_rate=(1 + 0.15) ** (1 / 12) - 1,
from_date="2022-02-01",
to_date="2023-04-01",
frequency="M",
return_period_unit="months",
return_period_value=1,
)
print(f"{sharpe=}")

52
test2.py

@ -1,52 +0,0 @@
import time
from fincal.fincal import TimeSeries
# start = time.time()
# dfd = pd.read_csv("test_files/msft.csv") # , dtype=dict(nav=str))
# # dfd = dfd[dfd["amfi_code"] == 118825].reset_index(drop=True)
# print("instantiation took", round((time.time() - start) * 1000, 2), "ms")
# ts = TimeSeries([(i.date, i.nav) for i in dfd.itertuples()], frequency="D")
# print(repr(ts))
start = time.time()
# mdd = ts.max_drawdown()
# print(mdd)
# print("max drawdown calc took", round((time.time() - start) * 1000, 2), "ms")
# # print(ts[['2022-01-31', '2021-05-28']])
# rr = ts.calculate_rolling_returns(
# from_date='2021-01-01',
# to_date='2022-01-01',
# frequency='D',
# interval_type='days',
# interval_value=30,
# compounding=False
# )
data = [
("2022-01-01", 10),
# ("2022-01-08", 12),
("2022-01-15", 14),
("2022-01-22", 16)
# ("2020-02-07", 18),
# ("2020-02-14", 20),
# ("2020-02-21", 22),
# ("2020-02-28", 24),
# ("2020-03-01", 26),
# ("2020-03-01", 28),
# ("2020-03-01", 30),
# ("2020-03-01", 32),
# ("2021-03-01", 34),
]
ts = TimeSeries(data, "W")
# ts_expanded = ts.expand("D", "ffill", skip_weekends=True)
# for i in ts_expanded:
# print(i)
print(ts.get("2022-01-01"))
print(ts.ffill())
Loading…
Cancel
Save