Spaces:
Sleeping
Sleeping
from urllib import parse, request | |
import hashlib | |
from time import strftime, gmtime | |
import streamlit as st | |
from datetime import datetime, timedelta | |
import pandas as pd | |
import csv | |
from io import StringIO | |
# Function to fetch data from ShareASale | |
def fetch_shareasale_data(action_verb, affiliate_id, api_token, api_secret_key, api_version=2.4): | |
my_timestamp = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) | |
data = parse.urlencode({ | |
'affiliateId': affiliate_id, | |
'token': api_token, | |
'version': api_version, | |
'action': action_verb | |
}) | |
sig = f"{api_token}:{my_timestamp}:{action_verb}:{api_secret_key}" | |
sig_hash = hashlib.sha256(sig.encode('utf-8')).hexdigest() | |
my_headers = { | |
'x-ShareASale-Date': my_timestamp, | |
'x-ShareASale-Authentication': sig_hash | |
} | |
call = request.Request(f'https://shareasale.com/x.cfm?{data}', headers=my_headers) | |
try: | |
return_result = request.urlopen(call).read().decode('utf-8') | |
return return_result | |
except Exception as inst: | |
return str(inst) | |
# Parse CSV data to a Pandas DataFrame | |
def parse_csv_to_df(csv_data): | |
csv_stream = StringIO(csv_data) | |
return pd.read_csv(csv_stream, delimiter='|') | |
# Streamlit UI | |
st.title("Affiliate Earnings Dashboard") | |
st.sidebar.title("Settings") | |
# API and credential settings | |
my_affiliate_id = st.sidebar.text_input("ShareASale Affiliate ID", "599431") | |
api_token = st.sidebar.text_input("API Token", "YOUR_API_TOKEN") | |
api_secret_key = st.sidebar.text_input("API Secret Key", "YOUR_API_SECRET_KEY", type="password") | |
# Date range picker | |
start_date = st.sidebar.date_input("Start Date", datetime.now() - timedelta(days=30)) | |
end_date = st.sidebar.date_input("End Date", datetime.now()) | |
# Fetch data button | |
if st.sidebar.button("Fetch Data"): | |
# Fetch data from ShareASale | |
action_verb = 'ledger' | |
ledger_data = fetch_shareasale_data(action_verb, my_affiliate_id, api_token, api_secret_key) | |
# Parse the CSV data into a DataFrame | |
df = parse_csv_to_df(ledger_data) | |
# Filter rows where action is "Transaction Created" | |
df_filtered = df.loc[df['action'] == 'Transaction Created'] | |
# Remove 'ledgerid' and 'transid' columns | |
df_filtered = df_filtered.drop(columns=['ledgerid', 'transid', 'action']) | |
# Display the DataFrame as a table | |
st.write("Transaction Data") | |
st.write(df_filtered) | |
# Create a second table summing the impact for each unique merchantid | |
df_sumif = df_filtered.groupby('merchantid')['impact'].sum().reset_index() | |
# Calculate the total impact | |
total_impact = df_sumif['impact'].sum() | |
# Add a total row to the DataFrame | |
total_row = pd.DataFrame({'merchantid': ['Total'], 'impact': [total_impact]}) | |
df_sumif = pd.concat([df_sumif, total_row], ignore_index=True) | |
# Display the second DataFrame as a table | |
st.write("Impact Summary by Merchant") | |
st.write(df_sumif) | |