Skip to content

Commit

Permalink
Analysis stage (#844)
Browse files Browse the repository at this point in the history
This PR reworks the chainsync data to dashboard pipeline to move
computation away from the dashboard and into a separate process.

Major Changes:
- New `run_data_analysis.py` process to run analysis after
`acquire_data` that focuses on computation.
- This stage currently computes spot price, fixed rate, base buffer,
wallet positions, pnl, and the ticker.
- Adding new tables (and interface) to support output of data analysis.
- Simplifying dashboard frontend to query output of data analysis.

Minor Changes:
- Adding drop argument to sqlalchemy `initialize_session` to drop
existing tables if set to True for debugging.
- Adding in create table retries due to race condition between acquire
data and data analysis both trying to create tables.
- Removing obsolete agent positions data class.
- Renames WalletInfo schema to be WalletInfoFromChain.
- Removing chainsync scripts for calculating leaderboards from previous
trading competition.
- Renaming table names to be snake case.
- Adding in a rerun of bots + acquire data + data analysis in system
test to test data pipeline restarts.
- Splitting out system tests to different files.

Breaking Change:
- postgres db interface getter functions no longer return blockNumber as
an index. This is to keep the getter functions consistent, as not every
table stores the block number.

Bug Fixes:
- `smart_contract_preview_transaction` now takes in a block number
argument to query the mock trade at that exact block. This helps solve a
race condition where the preview could be mocking a position that's no
longer there.

TODO:
- More plots in dashboard.
  • Loading branch information
slundqui authored Aug 22, 2023
1 parent 438e9b0 commit f11ad66
Show file tree
Hide file tree
Showing 36 changed files with 1,186 additions and 1,011 deletions.
1 change: 1 addition & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
# This means that the local postgres fixture (which launches a docker container) will not automatically
# be cleaned up if you, e.g., use the debugger and a db test fails. Make sure to manually clean up.
# TODO maybe automatically close the container on catch here
# TODO this seems to happen sometimes, not all the time, track down

# Use this in conjunction with the following launch.json configuration:
# {
Expand Down
8 changes: 8 additions & 0 deletions lib/agent0/agent0/test_fixtures/cycle_trade_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,23 @@ def __init__(
budget: FixedPoint,
rng: NumpyGenerator | None = None,
slippage_tolerance: FixedPoint | None = None,
max_trades: int | None = None,
):
# We want to do a sequence of trades one at a time, so we keep an internal counter based on
# how many times `action` has been called.
self.counter = 0
self.max_trades = max_trades
super().__init__(budget, rng, slippage_tolerance)

def action(self, market: HyperdriveMarketState, wallet: HyperdriveWallet) -> list[Trade[HyperdriveMarketAction]]:
"""This agent simply opens all trades for a fixed amount and closes them after, one at a time"""
action_list = []

# Early stopping based on parameter
if (self.max_trades is not None) and (self.counter >= self.max_trades):
# We want this bot to exit and crash after it's done the trades it needs to do
raise AgentDoneException("Bot done")

if self.counter == 0:
# Add liquidity
action_list.append(
Expand Down
File renamed without changes.
9 changes: 9 additions & 0 deletions lib/chainsync/bin/run_data_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
"""Script to format on-chain hyperdrive pool, config, and transaction data post-processing."""
from __future__ import annotations

from chainsync.exec import data_analysis
from elfpy.utils import logs as log_utils

if __name__ == "__main__":
log_utils.setup_logging(".logging/data_analysis.log", log_stdout=True)
data_analysis()
54 changes: 22 additions & 32 deletions lib/chainsync/bin/run_hyperdrive_dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,17 @@

import mplfinance as mpf
import streamlit as st
from chainsync.analysis.calc_fixed_rate import calc_fixed_rate
from chainsync.analysis.calc_ohlcv import calc_ohlcv
from chainsync.analysis.calc_pnl import calc_closeout_pnl, calc_total_returns
from chainsync.dashboard import (
build_fixed_rate,
build_leaderboard,
build_ohlcv,
build_ticker,
get_combined_data,
get_user_lookup,
plot_fixed_rate,
plot_ohlcv,
)
from chainsync.db.base import get_user_map, initialize_session
from chainsync.db.hyperdrive import get_all_traders, get_pool_config, get_pool_info, get_transactions, get_wallet_deltas
from chainsync.db.hyperdrive import get_all_traders, get_pool_analysis, get_pool_config, get_ticker, get_wallet_pnl
from ethpy import build_eth_config

# pylint: disable=invalid-name
Expand All @@ -44,40 +42,34 @@
main_placeholder = st.empty()

main_fig = mpf.figure(style="mike", figsize=(15, 15))
ax_ohlcv = main_fig.add_subplot(3, 1, 1)
ax_vol = main_fig.add_subplot(3, 1, 2)
ax_fixed_rate = main_fig.add_subplot(3, 1, 3)
ax_ohlcv = main_fig.add_subplot(2, 1, 1)
ax_fixed_rate = main_fig.add_subplot(2, 1, 2)

while True:
# Place data and plots
# Wallet addr to username mapping
agents = get_all_traders(session)
user_map = get_user_map(session)
txn_data = get_transactions(session, -max_live_blocks)
pool_info_data = get_pool_info(session, -max_live_blocks, coerce_float=False)
combined_data = get_combined_data(txn_data, pool_info_data)
wallet_deltas = get_wallet_deltas(session, coerce_float=False)
user_lookup = get_user_lookup(agents, user_map)
ticker = build_ticker(wallet_deltas, txn_data, pool_info_data, user_lookup)

(fixed_rate_x, fixed_rate_y) = calc_fixed_rate(combined_data, config_data)
ohlcv = calc_ohlcv(combined_data, config_data, freq="5T")
pool_analysis = get_pool_analysis(session, start_block=-max_live_blocks, coerce_float=False)
ticker = get_ticker(session, start_block=-max_live_blocks, coerce_float=False)
# Adds user lookup to the ticker
display_ticker = build_ticker(ticker, user_lookup)

current_returns, current_wallet = calc_total_returns(config_data, pool_info_data, wallet_deltas)
current_wallet = calc_closeout_pnl(current_wallet, pool_info_data, eth_config) # calc pnl using closeout method
current_wallet.delta = current_wallet.delta.astype(float)
current_wallet.pnl = current_wallet.pnl.astype(float)
current_wallet.closeout_pnl = current_wallet.closeout_pnl.astype(float)
## TODO: FIX AGENT RESTARTS
## Add initial budget column to agents
## when agent restarts, use initial budget for agent's wallet address to set "budget" in Agent.Wallet
# get wallet pnl and calculate leaderboard
wallet_pnl = get_wallet_pnl(session, start_block=-max_live_blocks, coerce_float=False)
# Get the latest updated block
latest_wallet_pnl = wallet_pnl[wallet_pnl["blockNumber"] == wallet_pnl["blockNumber"].max()]
comb_rank, ind_rank = build_leaderboard(latest_wallet_pnl, user_lookup)

comb_rank, ind_rank = build_leaderboard(current_returns, user_lookup)
# build ohlcv and volume
ohlcv = build_ohlcv(pool_analysis, freq="5T")
# build fixed rate
fixed_rate = build_fixed_rate(pool_analysis)

with ticker_placeholder.container():
st.header("Ticker")
st.dataframe(ticker, height=200, use_container_width=True)
st.header("PNL")
st.dataframe(current_wallet, height=500, use_container_width=True)
st.dataframe(display_ticker, height=200, use_container_width=True)
st.header("Total Leaderboard")
st.dataframe(comb_rank, height=500, use_container_width=True)
st.header("Wallet Leaderboard")
Expand All @@ -86,14 +78,12 @@
with main_placeholder.container():
# Clears all axes
ax_ohlcv.clear()
ax_vol.clear()
ax_fixed_rate.clear()

plot_ohlcv(ohlcv, ax_ohlcv, ax_vol)
plot_fixed_rate(fixed_rate_x, fixed_rate_y, ax_fixed_rate)
plot_ohlcv(ohlcv, ax_ohlcv)
plot_fixed_rate(fixed_rate, ax_fixed_rate)

ax_ohlcv.tick_params(axis="both", which="both")
ax_vol.tick_params(axis="both", which="both")
ax_fixed_rate.tick_params(axis="both", which="both")
# Fix axes labels
main_fig.autofmt_xdate()
Expand Down
7 changes: 4 additions & 3 deletions lib/chainsync/chainsync/analysis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Analysis for trading."""
from .calc_fixed_rate import calc_fixed_rate
from .calc_ohlcv import calc_ohlcv
from .calc_pnl import calc_closeout_pnl, calc_single_closeout, calc_total_returns
from .calc_spot_price import calculate_spot_price, calculate_spot_price_for_position
from .calc_pnl import calc_closeout_pnl, calc_single_closeout
from .calc_spot_price import calc_spot_price, calculate_spot_price_for_position
from .calc_ticker import calc_ticker
from .data_to_analysis import data_to_analysis
23 changes: 23 additions & 0 deletions lib/chainsync/chainsync/analysis/calc_base_buffer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
"""Calculates the amount of base set aside that can't be withdrawn"""
from decimal import Decimal

import pandas as pd


def calc_base_buffer(
longs_outstanding: pd.Series, share_price: pd.Series, minimum_share_reserves: Decimal
) -> pd.Series:
"""Calculates the amount of base set aside that can't be withdrawn
Arguments
---------
longs_outstanding: pd.Series
The number of longs outstanding from the pool info
share_price: pd.Series
The share price from the pool info
minimum_share_reserves: Decimal
The minimum share reserves from the pool config
"""
# Pandas is smart enough to be able to broadcast with internal Decimal types at runtime
return longs_outstanding / share_price + minimum_share_reserves # type: ignore
23 changes: 7 additions & 16 deletions lib/chainsync/chainsync/analysis/calc_fixed_rate.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,13 @@
"""Calculate the fixed interest rate."""
from decimal import Decimal

import numpy as np
import pandas as pd

from .calc_spot_price import calculate_spot_price


def calc_fixed_rate(trade_data, config_data):
def calc_fixed_rate(spot_price: pd.Series, position_duration: Decimal):
"""Calculates the fixed rate given trade data."""
trade_data["rate"] = np.nan
annualized_time = config_data["positionDuration"] / Decimal(60 * 60 * 24 * 365)
spot_price = calculate_spot_price(
trade_data["share_reserves"],
trade_data["bond_reserves"],
config_data["initialSharePrice"],
config_data["invTimeStretch"],
)
fixed_rate = (Decimal(1) - spot_price) / (spot_price * annualized_time)
x_data = trade_data["timestamp"]
y_data = fixed_rate
return (x_data, y_data)
# Position duration (in seconds) in terms of fraction of year
annualized_time = position_duration / Decimal(60 * 60 * 24 * 365)
# Pandas is smart enough to be able to broadcast with internal Decimal types at runtime
fixed_rate = (1 - spot_price) / (spot_price * annualized_time) # type: ignore
return fixed_rate
38 changes: 0 additions & 38 deletions lib/chainsync/chainsync/analysis/calc_ohlcv.py

This file was deleted.

Loading

1 comment on commit f11ad66

@vercel
Copy link

@vercel vercel bot commented on f11ad66 Aug 22, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.