Added volatility function

This commit is contained in:
Gourav Kumar 2022-03-06 15:36:23 +05:30
parent 17b3e348a2
commit 24d5d253b5
2 changed files with 69 additions and 30 deletions

View File

@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import datetime import datetime
import statistics
from typing import Iterable, List, Literal, Mapping, Union from typing import Iterable, List, Literal, Mapping, Union
from dateutil.relativedelta import relativedelta from dateutil.relativedelta import relativedelta
@ -382,6 +383,23 @@ class TimeSeries(TimeSeriesCore):
rolling_returns.sort() rolling_returns.sort()
return self.__class__(rolling_returns, self.frequency.symbol) return self.__class__(rolling_returns, self.frequency.symbol)
def volatility(
self,
start_date: Union[str, datetime.datetime],
end_date: Union[str, datetime.datetime],
annualized: bool = True,
):
"""Calculates the volatility of the time series.add()
The volatility is calculated as the standard deviaion of periodic returns.
The periodicity of returns is based on the periodicity of underlying data.
"""
rolling_returns = self.calculate_rolling_returns(
from_date=start_date, to_date=end_date, interval_type=self.frequency.freq_type, compounding=False
)
sd = statistics.stdev(rolling_returns.values)
return sd
if __name__ == "__main__": if __name__ == "__main__":
date_series = [ date_series = [

View File

@ -1,37 +1,58 @@
# type: ignore import pandas as pd
if __name__ == "__main__": from fincal.fincal import TimeSeries, create_date_series
import datetime dfd = pd.read_csv("test_files/nav_history_daily - Copy.csv")
import time dfd = dfd[dfd["amfi_code"] == 118825].reset_index(drop=True)
ts = TimeSeries([(i.date, i.nav) for i in dfd.itertuples()], frequency="D")
repr(ts)
# print(ts[['2022-01-31', '2021-05-28']])
import pandas as pd # rr = ts.calculate_rolling_returns(from_date='2021-01-01', to_date='2022-01-01', frequency='D', interval_type='days', interval_value=30, compounding=False)
from fincal.fincal import TimeSeries
df = pd.read_csv('test_files/msft.csv') # data = [
df = df.sort_values(by='Date') # type: ignore # ("2020-01-01", 10),
data_list = [(i.Date, i.Close) for i in df.itertuples()] # ("2020-02-01", 12),
# ("2020-03-01", 14),
# ("2020-04-01", 16),
# ("2020-05-01", 18),
# ("2020-06-01", 20),
# ("2020-07-01", 22),
# ("2020-08-01", 24),
# ("2020-09-01", 26),
# ("2020-10-01", 28),
# ("2020-11-01", 30),
# ("2020-12-01", 32),
# ("2021-01-01", 34),
# ]
start = time.time() # ts = TimeSeries(data, frequency="M")
ts_data = TimeSeries(data_list, frequency='D', date_format='%d-%m-%Y') # rr = ts.calculate_rolling_returns(
print(f"Instantiation took {round((time.time() - start)*1000, 2)} ms") # "2020-02-01",
# ts_data.fill_missing_days() # "2021-01-01",
start = time.time() # if_not_found="nan",
# ts_data.calculate_returns(as_on=datetime.datetime(2022, 1, 4), closest='next', years=1) # compounding=False,
rr = ts_data.calculate_rolling_returns(datetime.datetime(1994, 1, 1), # interval_type="months",
datetime.datetime(2022, 2, 17), # interval_value=1,
frequency='D', # as_on_match="exact",
as_on_match='next', # )
prior_match='previous',
closest='previous',
years=1)
# ffill_data = ts_data.bfill() # for i in rr:
print(f"Calculation took {round((time.time() - start)*1000, 2)} ms") # print(i)
rr.sort()
for i in rr[:10]: # returns = ts.calculate_returns(
print(i) # "2020-04-25",
# print(ffill_data) # return_actual_date=True,
# print(ts_data) # closest_max_days=15,
# print(repr(ts_data)) # compounding=True,
# interval_type="days",
# interval_value=90,
# closest="previous",
# if_not_found="fail",
# )
# print(returns)
volatility = ts.volatility(start_date="2018-01-01", end_date="2021-01-01")
print(volatility)