Browse Source

Added volatility function

switch-to-decimal
Gourav Kumar 2 years ago
parent
commit
24d5d253b5
  1. 18
      fincal/fincal.py
  2. 95
      test2.py

18
fincal/fincal.py

@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import datetime import datetime
import statistics
from typing import Iterable, List, Literal, Mapping, Union from typing import Iterable, List, Literal, Mapping, Union
from dateutil.relativedelta import relativedelta from dateutil.relativedelta import relativedelta
@ -382,6 +383,23 @@ class TimeSeries(TimeSeriesCore):
rolling_returns.sort() rolling_returns.sort()
return self.__class__(rolling_returns, self.frequency.symbol) return self.__class__(rolling_returns, self.frequency.symbol)
def volatility(
self,
start_date: Union[str, datetime.datetime],
end_date: Union[str, datetime.datetime],
annualized: bool = True,
):
"""Calculates the volatility of the time series.add()
The volatility is calculated as the standard deviaion of periodic returns.
The periodicity of returns is based on the periodicity of underlying data.
"""
rolling_returns = self.calculate_rolling_returns(
from_date=start_date, to_date=end_date, interval_type=self.frequency.freq_type, compounding=False
)
sd = statistics.stdev(rolling_returns.values)
return sd
if __name__ == "__main__": if __name__ == "__main__":
date_series = [ date_series = [

95
test2.py

@ -1,37 +1,58 @@
# type: ignore import pandas as pd
if __name__ == "__main__": from fincal.fincal import TimeSeries, create_date_series
import datetime dfd = pd.read_csv("test_files/nav_history_daily - Copy.csv")
import time dfd = dfd[dfd["amfi_code"] == 118825].reset_index(drop=True)
ts = TimeSeries([(i.date, i.nav) for i in dfd.itertuples()], frequency="D")
import pandas as pd repr(ts)
# print(ts[['2022-01-31', '2021-05-28']])
from fincal.fincal import TimeSeries
# rr = ts.calculate_rolling_returns(from_date='2021-01-01', to_date='2022-01-01', frequency='D', interval_type='days', interval_value=30, compounding=False)
df = pd.read_csv('test_files/msft.csv')
df = df.sort_values(by='Date') # type: ignore
data_list = [(i.Date, i.Close) for i in df.itertuples()] # data = [
# ("2020-01-01", 10),
start = time.time() # ("2020-02-01", 12),
ts_data = TimeSeries(data_list, frequency='D', date_format='%d-%m-%Y') # ("2020-03-01", 14),
print(f"Instantiation took {round((time.time() - start)*1000, 2)} ms") # ("2020-04-01", 16),
# ts_data.fill_missing_days() # ("2020-05-01", 18),
start = time.time() # ("2020-06-01", 20),
# ts_data.calculate_returns(as_on=datetime.datetime(2022, 1, 4), closest='next', years=1) # ("2020-07-01", 22),
rr = ts_data.calculate_rolling_returns(datetime.datetime(1994, 1, 1), # ("2020-08-01", 24),
datetime.datetime(2022, 2, 17), # ("2020-09-01", 26),
frequency='D', # ("2020-10-01", 28),
as_on_match='next', # ("2020-11-01", 30),
prior_match='previous', # ("2020-12-01", 32),
closest='previous', # ("2021-01-01", 34),
years=1) # ]
# ffill_data = ts_data.bfill() # ts = TimeSeries(data, frequency="M")
print(f"Calculation took {round((time.time() - start)*1000, 2)} ms") # rr = ts.calculate_rolling_returns(
rr.sort() # "2020-02-01",
for i in rr[:10]: # "2021-01-01",
print(i) # if_not_found="nan",
# print(ffill_data) # compounding=False,
# print(ts_data) # interval_type="months",
# print(repr(ts_data)) # interval_value=1,
# as_on_match="exact",
# )
# for i in rr:
# print(i)
# returns = ts.calculate_returns(
# "2020-04-25",
# return_actual_date=True,
# closest_max_days=15,
# compounding=True,
# interval_type="days",
# interval_value=90,
# closest="previous",
# if_not_found="fail",
# )
# print(returns)
volatility = ts.volatility(start_date="2018-01-01", end_date="2021-01-01")
print(volatility)

Loading…
Cancel
Save