prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pandas as pd
import unittest
from context import grama as gr
from context import data
X = gr.Intention()
##==============================================================================
## transform summary functions
##==============================================================================
class TestSummaryFcn(unittest.TestCase):
def test_mean(self):
df = data.df_diamonds >> gr.tf_select(X.cut, X.x) >> gr.tf_head(5)
# straight summarize
t = df >> gr.tf_summarize(m=gr.mean(X.x))
df_truth = pd.DataFrame({"m": [4.086]})
self.assertTrue(t.equals(df_truth))
# grouped summarize
t = df >> gr.tf_group_by(X.cut) >> gr.tf_summarize(m=gr.mean(X.x))
df_truth = pd.DataFrame(
{"cut": ["Good", "Ideal", "Premium"], "m": [4.195, 3.950, 4.045]}
)
self.assertTrue(t.equals(df_truth))
# straight mutate
t = df >> gr.tf_mutate(m=gr.mean(X.x))
df_truth = df.copy()
df_truth["m"] = df_truth.x.mean()
self.assertTrue(t.equals(df_truth))
# grouped mutate
t = df >> gr.tf_group_by(X.cut) >> gr.tf_mutate(m=gr.mean(X.x))
df_truth["m"] = pd.Series([3.950, 4.045, 4.195, 4.045, 4.195])
self.assertTrue(t.sort_index().equals(df_truth))
def test_skew(self):
df_truth = | pd.DataFrame({"m": [0.09984760044443139]}) | pandas.DataFrame |
import json
import os
import shutil
import glob
import re
import warnings
import pandas as pd
import numpy as np
def reduce_df(path, output, nrows=None, chunksize=20000):
""" Load Google analytics data from JSON into a Pandas.DataFrame. """
if nrows and chunksize:
msg = "Reading {} rows in chunks of {}. We are gonna need {} chunks"
print(msg.format(nrows, chunksize, nrows / chunksize))
temp_dir = "../data/temp"
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
JSON_COLUMNS = ['device', 'geoNetwork', 'totals', 'trafficSource']
i = 0
for chunk in pd.read_csv(path,
converters={column: json.loads for column in JSON_COLUMNS},
dtype={'fullVisitorId': 'str'},
nrows=nrows,
chunksize=chunksize):
chunk = chunk.reset_index()
# Normalize JSON columns
for column in JSON_COLUMNS:
column_as_df = pd.io.json.json_normalize(chunk[column])
chunk = chunk.drop(column, axis=1).merge(column_as_df, right_index=True, left_index=True)
# Parse date
chunk['date'] = chunk['date'].apply(lambda x: pd.datetime.strptime(str(x), '%Y%m%d'))
# Only keep relevant columns
cols = ['date', 'fullVisitorId', 'operatingSystem', 'country', 'browser',
'pageviews', 'transactions', 'visits', 'transactionRevenue', 'visitStartTime']
try:
chunk = chunk[cols]
except KeyError as e:
# Regex magic to find exactly which columns were not found.
# Might be different in Python 3, be careful!
missing_cols = list(re.findall(r"'(.*?)'", e.args[0]))
for col in missing_cols:
print("Column {} was not found in chunk {}, filling with zeroes".format(col, i))
chunk[col] = [0] * len(chunk)
chunk = chunk[cols]
print("Loaded chunk {}, Shape is: {}".format(i, chunk.shape))
chunk.to_csv(os.path.join(temp_dir, str(i) + ".csv"), encoding='utf-8', index=False)
i += 1
print("Finished all chunks, now concatenating")
files = glob.glob(os.path.join(temp_dir, "*.csv"))
with open(output, 'wb') as outfile:
for i, fname in enumerate(files):
with open(fname, 'rb') as infile:
# Throw away header on all but first file
if i != 0:
infile.readline()
# Block copy rest of file from input to output without parsing
shutil.copyfileobj(infile, outfile)
print("Deleting temp folder {}".format(temp_dir))
shutil.rmtree(temp_dir)
def aggregate(df):
"""Group and pivot the dataframe so that we have one row per visitor.
This row includes features of two types:
* Dynamic. These are repeated for each month and capture the time-series like behavior.
Some examples are the revenue and visits per month, for every month in the dataset.
* Static. These exist once per user and correspond to relatively constant properties.
Examples include the person's country, OS and browser.
Parameters
----------
df : pd.DataFrame
The original reduced dataframe - see the `reduce_df` function
Returns
-------
pd.DataFrame
The same dataframe grouped and pivoted, where each visitor is a single row.
"""
df['transactions'].fillna(0, inplace=True)
df['transactionRevenue'].fillna(0.0, inplace=True)
def agg_date(s):
date = | pd.datetime.strptime(s, '%Y-%m-%d') | pandas.datetime.strptime |
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from concurrent.futures import ProcessPoolExecutor, as_completed
from collections import namedtuple
from multiprocessing import cpu_count
from typing import Union, Iterable, Callable, Generator
from scipy.cluster.vq import kmeans2
try:
import matplotlib.pyplot as plt
MATPLOTLIB_FOUND = True
except ImportError:
MATPLOTLIB_FOUND = False
warnings.warn("matplotlib not installed; results plotting is disabled.")
try:
from joblib import Parallel, delayed
except ImportError:
Parallel, delayed = None, None
warnings.warn(
"joblib not installed, will be unavailable as a backend for parallel processing."
)
GapCalcResult = namedtuple(
"GapCalcResult", "gap_value n_clusters ref_dispersion_std sdk sk gap_star sk_star"
)
class OptimalK:
"""
Obtain the optimal number of clusters a dataset should have using the gap statistic.
Tibshirani, Walther, Hastie
http://www.web.stanford.edu/~hastie/Papers/gap.pdf
Example:
>>> from sklearn.datasets.samples_generator import make_blobs
>>> from gap_statistic import OptimalK
>>> X, y = make_blobs(n_samples=int(1e5), n_features=2, centers=3, random_state=100)
>>> optimalK = OptimalK(parallel_backend='joblib')
>>> optimalK(X, cluster_array=[1,2,3,4,5])
3
"""
gap_df = None
def __init__(
self,
n_jobs: int = -1,
parallel_backend: str = "joblib",
clusterer: Callable = None,
clusterer_kwargs: dict = None,
n_iter: int = 10,
) -> None:
"""
Construct OptimalK to use n_jobs (multiprocessing using joblib, multiprocessing, or single core.
if parallel_backend == 'rust' it will use all cores.
:param n_jobs:
:param parallel_backend:
:param clusterer:
:param clusterer_kwargs:
:param n_iter int: only valid for 'rust' backend, iterations for Kmeans
"""
if clusterer is not None and parallel_backend == "rust":
raise ValueError(
"Cannot use 'rust' backend with a user defined clustering function, only KMeans"
" is supported on the rust implementation"
)
self.parallel_backend = (
parallel_backend
if parallel_backend in ["joblib", "multiprocessing", "rust"]
else None
)
self.n_iter = n_iter
self.n_jobs = n_jobs if 1 <= n_jobs <= cpu_count() else cpu_count() # type: int
self.n_jobs = 1 if parallel_backend is None else self.n_jobs
self.clusterer = clusterer if clusterer is not None else kmeans2
self.clusterer_kwargs = (
clusterer_kwargs or dict()
if clusterer is not None
else dict(iter=10, minit="points")
)
def __call__(
self,
X: Union[pd.DataFrame, np.ndarray],
n_refs: int = 3,
cluster_array: Iterable[int] = (),
):
"""
Calculates KMeans optimal K using Gap Statistic from Tibshirani, <NAME>
http://www.web.stanford.edu/~hastie/Papers/gap.pdf
:param X - pandas dataframe or numpy array of data points of shape (n_samples, n_features)
:param n_refs - int: Number of random reference data sets used as inertia reference to actual data.
:param cluster_array - 1d iterable of integers; each representing n_clusters to try on the data.
"""
# Convert the 1d array of n_clusters to try into an array
# Raise error if values are less than 1 or larger than the unique sample in the set.
cluster_array = np.array([x for x in cluster_array]).astype(int)
if np.where(cluster_array < 1)[0].shape[0]:
raise ValueError(
"cluster_array contains values less than 1: {}".format(
cluster_array[np.where(cluster_array < 1)[0]]
)
)
if cluster_array.shape[0] > X.shape[0]:
raise ValueError(
"The number of suggested clusters to try ({}) is larger than samples in dataset. ({})".format(
cluster_array.shape[0], X.shape[0]
)
)
if not cluster_array.shape[0]:
raise ValueError("The supplied cluster_array has no values.")
# Array of resulting gaps.
gap_df = | pd.DataFrame({"n_clusters": [], "gap_value": []}) | pandas.DataFrame |
import sys
import json
import socket
import spotipy
import asyncio
import webbrowser
from time import time
from spotipy import oauth2
import pandas as pd
import re
from datetime import datetime
from config import *
tracks_dict_names = ['id', 'duration_ms', 'href', 'name', 'popularity', 'uri', 'artists']
def listen_for_callback_code():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', int(redirect_uri.split(":")[-1])))
s.listen(1)
while True:
connection, address = s.accept()
buf = str(connection.recv(1024))
if len(buf) > 0:
break
start_code = buf.find("?code=") + 6
end_code = buf.find(" ", start_code)
if "&" in buf[start_code:end_code]:
end_code = buf.find("&")
return buf[start_code:end_code]
async def get_spotify_auth_code():
auth_url = sp_oauth.get_authorize_url()
webbrowser.open(auth_url)
async def async_get_auth_code():
task = asyncio.create_task(get_spotify_auth_code())
await task
return listen_for_callback_code()
def do_spotify_oauth():
try:
with open("token.json", "r") as fh:
token = fh.read()
token = json.loads(token)
except:
token = None
if token:
if int(time()) > (token["expires_at"] - 50): # Take 50s margin to avoid timeout while searching
print("Refreshing Spotify token")
token = sp_oauth.refresh_access_token(token["refresh_token"])
else:
authorization_code = asyncio.run(async_get_auth_code())
print(authorization_code)
if not authorization_code:
print("\n[!] Unable to authenticate to Spotify. Couldn't get authorization code")
sys.exit(-1)
token = sp_oauth.get_access_token(authorization_code)
if not token:
print("\n[!] Unable to authenticate to Spotify. Couldn't get access token.")
sys.exit(-1)
try:
with open("token.json", "w+") as fh:
fh.write(json.dumps(token))
except:
print("\n[!] Unable to to write token object to disk. This is non-fatal.")
return token
def get_all_playlists():
playlists_pager = spotify_ins.user_playlists(username)
playlists = playlists_pager["items"]
while playlists_pager["next"]:
playlists_pager = spotify_ins.next(playlists_pager)
playlists.extend(playlists_pager["items"])
return playlists
def create_playlist(playlist_name):
# TODO export parameter description
playlist = spotify_ins.user_playlist_create(username, playlist_name, description=playlist_description)
return playlist["id"]
def get_playlist_id(playlist_name):
playlists = get_all_playlists()
for playlist in playlists:
if playlist['owner']['id'] == username: # Can only modify playlist that the user owns
if playlist["name"] == playlist_name:
return playlist["id"]
return None
def do_durations_match(source_track_duration, found_track_duration, silent=silent_search):
if source_track_duration == found_track_duration:
if not silent: print("\t\t\t\t[+] Durations match")
return True
else:
if not silent: print("\t\t\t\t[!] Durations do not match")
return False
def most_popular_track(tracks):
# Popularity does not always yield the correct result
high_score = 0
winner = None
for track in tracks:
if track["popularity"] > high_score:
winner = track["id"]
high_score = track["popularity"]
return winner
def best_of_multiple_matches(source_track, found_tracks, silent=silent_search):
counter = 1
duration_matches = [0, ]
for track in found_tracks:
if not silent: print("\t\t\t[+] Match {}: {}".format(counter, track["id"]))
if do_durations_match(source_track["duration_ms"], track["duration_ms"]):
duration_matches[0] += 1
duration_matches.append(track)
counter += 1
if duration_matches[0] == 1:
best_track = duration_matches.pop()["id"]
if not silent: print(
"\t\t\t[+] Only one exact match with matching duration, going with that one: {}".format(best_track))
return best_track
# TODO: Popularity does not always yield the correct result
best_track = most_popular_track(found_tracks)
if not silent: print(
"\t\t\t[+] Multiple exact matches with matching durations, going with the most popular one: {}".format(
best_track))
return best_track
def search_for_track(track, silent=silent_search):
# TODO: This is repetitive, can probably refactor but works for now
if not silent: print("\n[+] Searching for track: {}{}by {} on {}".format(track["name"], " " if not track[
"mix"] else " ({}) ".format(track["mix"]), ", ".join(track["artists"]), track["release"]))
# Search with Title, Mix, Artists, and Release / Album
query = "{}{}{} {}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]),
" ".join(track["artists"]), track["release"])
if not silent: print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
if not silent: print("\t\t[+] Found an exact match on name, mix, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
if not silent: print("\t\t[+] Found multiple exact matches ({}) on name, mix, artists, and release.".format(
len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o release
if not silent: print("\t\t[+] No exact matches on name, mix, artists, and release. Trying without release.")
# Search with Title, Mix, and Artists
query = "{}{}{}".format(track["name"], " " if not track["mix"] else " {} ".format(track["mix"]),
" ".join(track["artists"]))
if not silent: print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
if not silent: print("\t\t[+] Found an exact match on name, mix, and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
if not silent: print("\t\t[+] Found multiple exact matches ({}) on name, mix, and artists.".format(
len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix, but with release
if not silent: print("\t\t[+] No exact matches on name, mix, and artists. Trying without mix, but with release.")
query = "{} {} {}".format(track["name"], " ".join(track["artists"]), track["release"])
if not silent: print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
if not silent: print("\t\t[+] Found an exact match on name, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
if not silent: print("\t\t[+] Found multiple exact matches ({}) on name, artists, and release.".format(
len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
# Not enough results, search w/o mix or release
if not silent: print("\t\t[+] No exact matches on name, artists, and release. Trying with just name and artists.")
query = "{} {}".format(track["name"], " ".join(track["artists"]))
if not silent: print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
if not silent: print("\t\t[+] Found an exact match on name and artists: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
if not silent: print("\t\t[+] Found multiple exact matches ({}) on name and artists.".format(
len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
print("\t\t[+] No exact matches on name and artists v1 : {} - {}{}".format(track["artists"][0], track["name"],
"" if not track[
"mix"] else " - {}".format(
track["mix"])))
print("\t[!] Could not find this song on Spotify!")
return None
def parse_search_results_spotify(search_results, track, silent=silent_search):
"""
:param search_results: Spotify API search result
:param track: track dict to search
:param silent: If false print detailed search results
:return: track_id as string if found, else None
"""
track_id = None
if len(search_results["tracks"]["items"]) == 1:
track_id = search_results["tracks"]["items"][0]["id"]
if not silent: print("\t\t[+] Found an exact match on name, mix, artists, and release: {}".format(track_id))
do_durations_match(track["duration_ms"], search_results["tracks"]["items"][0]["duration_ms"])
return track_id
if len(search_results["tracks"]["items"]) > 1:
if not silent: print("\t\t[+] Found multiple exact matches ({}) on name, mix, artists, and release.".format(
len(search_results["tracks"]["items"])))
return best_of_multiple_matches(track, search_results["tracks"]["items"])
return track_id
def parse_track_regex_beatport(track):
track_out = track.copy() # Otherwise modifies the dict
track_out["name"] = re.sub(r'(\s*(Feat|feat|Ft|ft)\. [\w\s]*$)', '',
track_out["name"]) # Remove feat info, mostly not present in spotify
track_out["name"] = re.sub(r'\W', ' ',
track_out["name"]) # Remove special characters as they are not handled by Spotify API
return track_out
def parse_track_regex_beatport_v2(track):
track_out = track.copy() # Otherwise modifies the dict
track_out["name"] = re.sub(r'(\s*(Feat|feat|Ft|ft)\. [\w\s]*$)', '',
track_out["name"]) # Remove feat info, mostly not present in spotify
track_out["name"] = re.sub(r'[^\w\s]', '',
track_out["name"]) # Remove special characters as they are not handled by Spotify API
return track_out
def parse_track_regex_beatport_v3(track):
track_out = track.copy() # Otherwise modifies the dict
track_out["name"] = re.sub(r'(\s*(Feat|feat|Ft|ft)\. [\w\s]*$)', '',
track_out["name"]) # Remove feat info, mostly not present in spotify
track_out["name"] = re.sub(r'[^\w\s]', '',
track_out["name"]) # Remove special characters as they are not handled by Spotify API
track_out["mix"] = re.sub("[R|r]emix", 'mix',
track_out["mix"]) # Change remix
track_out["mix"] = re.sub("[M|m]ix", 'Remix',
track_out["mix"]) # Change to remix
return track_out
def parse_track_regex_beatport_v4(track):
track_out = track.copy() # Otherwise modifies the dict
track_out["name"] = re.sub(r'(\s*(Feat|feat|Ft|ft)\. [\w\s]*$)', '',
track_out["name"]) # Remove feat info, mostly not present in spotify
track_out["name"] = re.sub(r'[^\w\s]', '',
track_out["name"]) # Remove special characters as they are not handled by Spotify API
track_out["mix"] = re.sub("[M|m]ix", '',
track_out["mix"]) # Remove special characters as they are not handled by Spotify API
return track_out
def add_space(match):
return " " + match.group()
def search_for_track_v2(track, silent=silent_search, parse_track=parse_track):
"""
:param track: track dict
:param silent: if true does not pring detailed, only if not found
:param parse_track: if true try to remove (.*) and mix information
:return: Spotify track_id
"""
if parse_track:
track_parsed = [track.copy(), parse_track_regex_beatport(track),
parse_track_regex_beatport_v2(track), parse_track_regex_beatport_v3(track),
parse_track_regex_beatport_v4(track)]
else:
track_parsed = [track]
for track_ in track_parsed:
# Create a field name mix according to Spotify formatting
track_["name_mix"] = "{}{}".format(track_["name"], "" if not track_["mix"] else " - {}".format(track_["mix"]))
# Create a parsed artist and try both
artist_search = [*track_["artists"]]
if parse_track:
# Add parsed artist if not in list already
artist_search.extend(x for x in [re.sub(r'\s*\([^)]*\)', '', artist_) for artist_ in track_["artists"]] if
x not in artist_search) # Remove (UK) for example
artist_search.extend(x for x in [re.sub(r'\W+', ' ', artist_) for artist_ in track_["artists"]] if
x not in artist_search) # Remove special characters, in case it is not handled by Spotify API
artist_search.extend(x for x in [re.sub(r'[^\w\s]', '', artist_) for artist_ in track_["artists"]] if
x not in artist_search) # Remove special characters, in case it is not handled by Spotify API
artist_search.extend(
x for x in [re.sub(r'(?<=\w)[A-Z]', add_space, artist_) for artist_ in track_["artists"]] if
x not in artist_search) # Splitting artist name with a space after a capital letter
artist_search.extend(x for x in [re.sub(r'\s&.*$', "", artist_) for artist_ in track_["artists"]] if
x not in artist_search) # Removing second part after &
# Search artist and artist parsed if parsed is on
for artist in artist_search:
# Search track name and track name without mix (even if parsed is off)
for track_name in [track_["name_mix"], track_["name"]]:
# Search with Title, Mix, Artist, Release / Album and Label
if not silent:
print("\n[+] Searching for track: {} by {} on {} on {} label".format(track_name, artist,
track_["release"],
track_["label"]))
query = 'track:"{}" artist:"{}" album:"{}" label:"{}"'.format(track_name, artist,
track_["release"],
track_["label"])
if not silent:
print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
track_id = parse_search_results_spotify(search_results, track_)
if track_id:
return track_id
# Search with Title, Mix, Artist and Label, w/o Release / Album
if not silent:
print("\n[+] Searching for track: {} by {} on {} label".format(track_name, artist, track_["label"]))
query = 'track:"{}" artist:"{}" label:"{}"'.format(track_name, artist, track_["label"])
if not silent:
print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
track_id = parse_search_results_spotify(search_results, track_)
if track_id:
return track_id
# Search with Title, Mix, Artist, Release / Album, w/o Label
if not silent:
print(
"\n[+] Searching for track: {} by {} on {} album".format(track_name, artist, track_["release"]))
query = 'track:"{}" artist:"{}" album:"{}"'.format(track_name, artist, track_["release"])
if not silent:
print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
track_id = parse_search_results_spotify(search_results, track_)
if track_id:
return track_id
# Search with Title, Artist, Release / Album and Label, w/o Release and Label
if not silent:
print("\n[+] Searching for track: {} by {}".format(track_name, artist))
query = 'track:"{}" artist:"{}"'.format(track_name, artist)
if not silent:
print("\t[+] Search Query: {}".format(query))
search_results = spotify_ins.search(query)
track_id = parse_search_results_spotify(search_results, track_)
if track_id:
return track_id
print("\t[+] No exact matches on name and artists v2 : {} - {}{}".format(track["artists"][0], track["name"],
"" if not track[
"mix"] else " - {}".format(
track["mix"])))
# Possible to use return search_for_track(track) but do not improve search results
return None
def track_in_playlist(playlist_id, track_id):
for track in get_all_tracks_in_playlist(playlist_id):
if track["track"]["id"] == track_id:
return True
return False
def add_tracks_to_playlist(playlist_id, track_ids):
if track_ids:
spotify_auth()
spotify_ins.user_playlist_add_tracks(username, playlist_id, track_ids)
def get_all_tracks_in_playlist(playlist_id):
playlist_tracks_results = spotify_ins.user_playlist(username, playlist_id, fields="tracks")
playlist_tracks_pager = playlist_tracks_results["tracks"]
playlist_tracks = playlist_tracks_pager["items"]
while playlist_tracks_pager["next"]:
playlist_tracks_pager = spotify_ins.next(playlist_tracks_pager)
playlist_tracks.extend(playlist_tracks_pager["items"])
return playlist_tracks
def clear_playlist(playlist_id):
for track in get_all_tracks_in_playlist(playlist_id):
spotify_ins.user_playlist_remove_all_occurrences_of_tracks(username, playlist_id, [track["track"]["id"], ])
def add_new_tracks_to_playlist(genre, tracks_dict):
# TODO export playlist anterior name to config
# persistent_top_100_playlist_name = "{}{} - Top 100".format(playlist_prefix, genre)
# daily_top_10_playlist_name = "{}{} - Daily Top".format(playlist_prefix, genre)
persistent_top_100_playlist_name = "Beatporter: {} - Top 100".format(genre)
daily_top_n_playlist_name = "Beatporter: {} - Daily Top".format(genre)
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_top_100_playlist_name))
if daily_mode:
playlists = [
{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)},
{"name": daily_top_n_playlist_name, "id": get_playlist_id(daily_top_n_playlist_name)}]
else:
playlists = [
{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)}]
for playlist in playlists:
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
if daily_mode:
# Clear daily playlist
clear_playlist(playlists[1]["id"])
persistent_top_100_track_ids = list()
daily_top_n_track_ids = list()
track_count = 0
for track in tracks_dict:
track_id = search_for_track_v2(track)
if track_id and not track_in_playlist(playlists[0]["id"], track_id):
persistent_top_100_track_ids.append(track_id)
if track_id and track_count < daily_n_track:
daily_top_n_track_ids.append(track_id)
track_count += 1
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_top_100_track_ids),
persistent_top_100_playlist_name))
add_tracks_to_playlist(playlists[0]["id"], persistent_top_100_track_ids)
if daily_mode:
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(daily_top_n_track_ids),
daily_top_n_playlist_name))
add_tracks_to_playlist(playlists[1]["id"], daily_top_n_track_ids)
def parse_tracks_spotify(tracks_json):
tracks = list()
for track in tracks_json["tracks"]:
tracks.append(
{
"title": track["title"],
"name": track["name"],
"mix": track["mix"],
"artists": [artist["name"] for artist in track["artists"]],
"remixers": [remixer["name"] for remixer in track["remixers"]],
"release": track["release"]["name"],
"label": track["label"]["name"],
"published_date": track["date"]["published"],
"released_date": track["date"]["released"],
"duration": track["duration"]["minutes"],
"duration_ms": track["duration"]["milliseconds"],
"genres": [genre["name"] for genre in track["genres"]],
"bpm": track["bpm"],
"key": track["key"]
}
)
return tracks
def parse_artist(value, key):
# TODO find better method
if key == 'artists':
value = value[0]['name']
else:
value
return value
def update_hist_pl_tracks(df_hist_pl_tracks, playlist):
"""
:param df_hist_pl_tracks: dataframe of history of track id and playlist id
:param playlist: dict typ playlist = {"name": playlist_name, "id": playlist_id}
:return: updated df_hist_pl_tracks
"""
# TODO find better method
track_list = get_all_tracks_in_playlist(playlist["id"])
df_tracks = pd.DataFrame.from_dict(track_list)
if len(df_tracks.index) > 0:
df_tracks['track'] = [{key: value for key, value in track.items() if key in tracks_dict_names} for track in
df_tracks['track']]
df_tracks['track'] = [{key: parse_artist(value, key) for key, value in track.items()} for track in
df_tracks['track']]
df_tracks_o = pd.DataFrame()
for row in df_tracks.iterrows():
df_tracks_o = df_tracks_o.append(pd.DataFrame(row[1]['track'], index=[0]))
df_tracks_o = df_tracks_o.loc[:, tracks_dict_names].reset_index(drop=True)
df_tracks_o['artist_name'] = df_tracks_o['artists'] + " - " + df_tracks_o['name']
df_tracks = pd.concat([df_tracks_o, df_tracks.loc[:, 'added_at']], axis=1)
df_temp = df_tracks.loc[:, ['id', 'added_at', 'artist_name']]
df_temp['playlist_id'] = playlist["id"]
df_temp = df_temp.rename(columns={'id': 'track_id', 'added_at': 'datetime_added'})
df_hist_pl_tracks = df_hist_pl_tracks.append(df_temp).drop_duplicates().reset_index(drop=True)
return (df_hist_pl_tracks)
def find_playlist_chart_label(title):
"""
:param title: chart or label title
:return: dict of playlist name and playlist ID, playlist ID is None if not found
"""
persistent_playlist_name = "{}{}".format(playlist_prefix, title)
playlist = {"name": persistent_playlist_name, "id": get_playlist_id(persistent_playlist_name)}
return playlist
def add_new_tracks_to_playlist_chart_label(title, tracks_dict, df_hist_pl_tracks, use_prefix=True,
silent=silent_search):
"""
:param title: Chart or label playlist title
:param tracks_dict: dict of tracks to add
:param df_hist_pl_tracks: dataframe of history of track, will not add track_id already present
:param use_prefix: add a prefix to the playlist name as defined in config
:param silent: If true do not display searching details except errors
:return: updated df_hist_pl_tracks
"""
# TODO Refresh oauth to avoid time out
spotify_auth()
# TODO export playlist anterior name to config
if use_prefix:
persistent_playlist_name = "{}{}".format(playlist_prefix, title)
else:
persistent_playlist_name = title
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_playlist_name))
playlist = {"name": persistent_playlist_name, "id": get_playlist_id(persistent_playlist_name)}
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
playlist_track_ids = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlist["id"], "track_id"]
if digging_mode == "playlist":
df_local_hist = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlist["id"]]
elif digging_mode == "all":
df_local_hist = df_hist_pl_tracks
else:
df_local_hist = pd.DataFrame(columns=['playlist_id', 'track_id', 'datetime_added', 'artist_name'])
persistent_track_ids = list()
track_count = 0
track_count_tot = 0
for track in tracks_dict:
track_count_tot += 1
track_artist_name = track['artists'][0] + " - " + track['name'] + " - " + track["mix"]
if not silent:
print("{}% : {} : nb {} out of {}".format(str(round(track_count_tot / len(tracks_dict) * 100, 2)),
track_artist_name, track_count_tot, len(tracks_dict)))
if track_artist_name not in df_local_hist.values:
track_id = search_for_track_v2(track)
if track_id and track_id not in playlist_track_ids.values and track_id not in df_local_hist.values:
if not silent:
print("\t[+] Adding track id : {} : nb {}".format(track_id, track_count))
persistent_track_ids.append(track_id)
track_count += 1
if track_count >= 99: # Have limit of 100 trakcks per import
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_track_ids),
persistent_playlist_name))
add_tracks_to_playlist(playlist["id"], persistent_track_ids)
# TODO consider only adding new ID to avoid reloading large playlist
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
playlist_track_ids = df_hist_pl_tracks.loc[
df_hist_pl_tracks["playlist_id"] == playlist["id"], "track_id"]
track_count = 0
persistent_track_ids = list()
update_playlist_description_with_date(playlist)
else:
if not silent:
print("\tSimilar track name already found")
if track_count_tot % refresh_token_n_tracks == 0: # Avoid time out
spotify_auth()
print("[+] Identifying new tracks for playlist: \"{}\"\n".format(persistent_playlist_name))
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_track_ids),
persistent_playlist_name))
if len(persistent_track_ids) > 0:
add_tracks_to_playlist(playlist["id"], persistent_track_ids)
update_playlist_description_with_date(playlist)
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
return df_hist_pl_tracks
def add_new_tracks_to_playlist_id(playlist_name, track_ids, df_hist_pl_tracks, silent=silent_search):
"""
:param playlist_name: Playlist name to be used, will not be modified
:param track_ids: dict of tracks with their IDS
:param df_hist_pl_tracks: dataframe of history of track, will not add track_id already present
:param silent: If true do not display searching details except errors
:return: updated df_hist_pl_tracks
"""
# TODO unify all add_new_track in one function
# TODO Refresh oauth to avoid time out
spotify_auth()
# TODO export playlist prefix name to config
persistent_playlist_name = playlist_name
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_playlist_name))
playlist = {"name": persistent_playlist_name, "id": get_playlist_id(persistent_playlist_name)}
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
playlist_track_ids = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlist["id"], "track_id"]
if digging_mode == "playlist":
df_local_hist = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlist["id"]]
elif digging_mode == "all":
df_local_hist = df_hist_pl_tracks
else:
df_local_hist = pd.DataFrame(columns=['playlist_id', 'track_id', 'datetime_added', 'artist_name'])
persistent_track_ids = list()
track_count = 0
track_count_tot = 0
for track in track_ids:
if track['track'] is not None: # Prevent error of empty track
track_id = track['track']['id']
track_count_tot += 1
if track_id not in df_local_hist.values:
if track_id not in playlist_track_ids.values:
if not silent:
print("\t[+] Adding track id : {} : nb {}".format(track_id, track_count))
persistent_track_ids.append(track_id)
track_count += 1
if track_count >= 99: # Have limit of 100 trakcks per import
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_track_ids),
persistent_playlist_name))
add_tracks_to_playlist(playlist["id"], persistent_track_ids)
# TODO consider only adding new ID to avoid reloading large playlist
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
playlist_track_ids = df_hist_pl_tracks.loc[
df_hist_pl_tracks["playlist_id"] == playlist["id"], "track_id"]
track_count = 0
persistent_track_ids = list()
update_playlist_description_with_date(playlist)
else:
if not silent:
print("\tTrack already found in playlist or history")
if track_count_tot % refresh_token_n_tracks == 0: # Avoid time out
spotify_auth()
print("[+] Identifying new tracks for playlist: \"{}\"\n".format(persistent_playlist_name))
print("\n[+] Adding {} new tracks to the playlist: \"{}\"".format(len(persistent_track_ids),
persistent_playlist_name))
if len(persistent_track_ids) > 0:
add_tracks_to_playlist(playlist["id"], persistent_track_ids)
update_playlist_description_with_date(playlist)
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
return df_hist_pl_tracks
def add_new_tracks_to_playlist_genre(genre, top_100_chart, df_hist_pl_tracks, silent=silent_search):
"""
:param genre: Genre name
:param top_100_chart: dict of tracks to add
:param df_hist_pl_tracks: dataframe of history of track, will not add track_id already present
:param silent: If true do not display searching details except errors
:return: updated df_hist_pl_tracks
"""
# TODO export playlist anterior name to config
# persistent_top_100_playlist_name = "{}{} - Top 100".format(playlist_prefix, genre)
# daily_top_10_playlist_name = "{}{} - Daily Top".format(playlist_prefix, genre)
persistent_top_100_playlist_name = "Beatporter: {} - Top 100".format(genre)
daily_top_n_playlist_name = "Beatporter: {} - Daily Top".format(genre)
print("[+] Identifying new tracks for playlist: \"{}\"".format(persistent_top_100_playlist_name))
if daily_mode:
playlists = [
{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)},
{"name": daily_top_n_playlist_name, "id": get_playlist_id(daily_top_n_playlist_name)}]
else:
playlists = [
{"name": persistent_top_100_playlist_name, "id": get_playlist_id(persistent_top_100_playlist_name)}]
for playlist in playlists:
if not playlist["id"]:
print("\t[!] Playlist \"{}\" does not exist, creating it.".format(playlist["name"]))
playlist["id"] = create_playlist(playlist["name"])
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlist)
# Create local hist for top 100 playlist
if digging_mode == "playlist":
df_local_hist = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlists[0]["id"]]
elif digging_mode == "all":
df_local_hist = df_hist_pl_tracks
else:
df_local_hist = pd.DataFrame(columns=['playlist_id', 'track_id', 'datetime_added', 'artist_name'])
playlist_track_ids = df_hist_pl_tracks.loc[df_hist_pl_tracks["playlist_id"] == playlists[0]["id"], "track_id"]
if daily_mode:
if digging_mode == "":
# Clear daily playlist if digging mode is not using hist otherwise will delete tracks not yet listened
clear_playlist(playlists[1]["id"])
df_hist_pl_tracks = update_hist_pl_tracks(df_hist_pl_tracks, playlists[1])
playlist_track_ids_daily = | pd.Series([], name="track_id", dtype=object) | pandas.Series |
import argparse
from collections import Counter, defaultdict
from identityholder import IdentityHolder
import liuwangcycles
from minimumtopologies import ispositive, ismutualinhibition
import networkx as nx
import pandas as pd
# PyPy virtual environment recommended for performance
def countmotifs(network, max_cycle_length=None, max_motif_size=None, check_nfl=False, callback=None):
"""
Systematically count/enumerate instances of high-feedback topologies in a network.
Each instance is a way to choose a set of edges such that no edge can be removed without abolishing the high-feedback nature of the subnetwork.
This is similar to the count of "minimum topologies" from Ye et al. 2019, but some topologies with more than the required number of cycles can
still be considered minimal by this function if removing any edge would leave fewer cycles than necessary.
Arguments:
- network: NetworkX DiGraph representing the network
- max_cycle_length: maximum length of cycle to enumerate (needed for moderately large networks since all cycles must be held in memory)
- max_motif_size: maximum size in nodes of counted motif instances
- check_nfl: whether to count excitable and mixed-sign high-feedback motifs, which require enumerating negative feedback loops as well (much slower)
- callback: callable to receive progress information and motif instances
The callback, if provided, must take two positional arguments: notification type and data. These notifications will be given:
- stage: the counting process moved into a new stage, name specified in data as string
- instance: a motif instance was found; data is a tuple of the motif name, frozenset of nodes involved, and list of cycle holders
- cycle_count: the number of cycles was determined, specified in data as int
- overlap_count: the number of cycle intersections was determined, specified in data as int
- overlap_progress: progress in enumerating triangles in the cycle intersection graph (e.g. Type 1), data is number of cycle intersections checked
- cycle_progress: progress in enumerating 3-paths in the cycle intersection graph (e.g. Type 2), data is number of cycles checked
Returns a tuple: counts of PFLs, Type 1, Type 2, MISA, MISSA, mini-MISSA, mixed-sign high-feedback, excitable.
"""
if callback is not None:
callback('stage', 'Finding cycles')
cycle_sets = [IdentityHolder(frozenset(cycle), (cycle, ispositive(network, cycle), hasrepression(network, cycle))) for
cycle in liuwangcycles.generatecycles(network, max_cycle_length) if check_nfl or ispositive(network, cycle)]
cycle_edge_sets = dict()
for holder in cycle_sets:
cycle = holder.tag[0]
edge_set = frozenset((cycle[n], cycle[(n + 1) % len(cycle)]) for n in range(len(cycle)))
cycle_edge_sets[holder] = edge_set
if callback is not None:
callback('instance', ('Cycle', holder.value, [holder]))
if holder.tag[1]:
callback('instance', ('PFL', holder.value, [holder]))
if callback is not None:
callback('stage', 'Creating cycle intersection graphs')
cycle_graph = nx.Graph()
cycle_graph.add_nodes_from(cycle_sets)
cycle_edge_graph = nx.Graph()
cycle_edge_graph.add_nodes_from(cycle_sets)
for i1, holder1 in enumerate(cycle_sets):
for i2 in range(i1 + 1, len(cycle_sets)):
holder2 = cycle_sets[i2]
shared_nodes = holder1.value.intersection(holder2.value)
if len(shared_nodes) > 0:
cycle_graph.add_edge(holder1, holder2, shared=shared_nodes)
shared_edges = cycle_edge_sets[holder1].intersection(cycle_edge_sets[holder2])
if len(shared_edges) > 0:
cycle_edge_graph.add_edge(holder1, holder2, shared=shared_edges)
def findinducedcycles(holders, ignoring=None):
common_neighbors = set(cycle_edge_graph[holders[0]])
for cn in range(1, len(holders)):
common_neighbors.intersection_update(set(cycle_edge_graph[holders[cn]]))
for common_neighbor in common_neighbors:
if common_neighbor is ignoring:
continue
all_edges = cycle_edge_sets[holders[0]]
for cn in range(1, len(holders)):
all_edges = all_edges.union(cycle_edge_sets[holders[cn]])
if cycle_edge_sets[common_neighbor] < all_edges:
yield common_neighbor
def coverextrapfl(holder1, holder2):
for common_neighbor in set(cycle_edge_graph[holder1]).intersection(set(cycle_edge_graph[holder2])):
if common_neighbor.tag[1] and cycle_edge_sets[common_neighbor] < cycle_edge_sets[holder1].union(cycle_edge_sets[holder2]):
return True
return False
if callback is not None:
callback('cycle_count', len(cycle_sets))
callback('overlap_count', len(cycle_graph.edges))
callback('stage', 'Searching for Type I and mixed-sign high-feedback motifs')
type1 = 0
mixed = 0
for triplet in findtriangles(cycle_graph, callback):
a, b, c = triplet
possible_type1 = (not check_nfl) or (a.tag[1] and b.tag[1] and c.tag[1])
if check_nfl and not (a.tag[1] or b.tag[1] or c.tag[1]):
continue
shared_nodes = cycle_graph.edges[a, b]['shared'].intersection(cycle_graph.edges[b, c]['shared'])
if len(shared_nodes) == 0:
continue
if not shared_nodes.isdisjoint(cycle_graph.edges[a, c]['shared']):
all_nodes = a.value.union(b.value).union(c.value) # Not a performance problem - big networks will have a max motif size set anyway
if max_motif_size and len(all_nodes) > max_motif_size:
continue
extra_cycles = list({*findinducedcycles([a, b], c), *findinducedcycles([a, c], b), *findinducedcycles([b, c], a), *findinducedcycles([a, b, c])})
relevant_extras = [c for c in extra_cycles if c.tag[1]] if possible_type1 else extra_cycles
if len(relevant_extras) > 0:
double_counting = False
if possible_type1:
double_counting = any(extra.isbefore(c) for extra in relevant_extras)
else:
rare_sign = a.tag[1] ^ b.tag[1] ^ c.tag[1] # True if there's only one PFL
for holder in triplet:
if any(extra.isbefore(holder) and (extra.tag[1] == rare_sign or holder.tag[1] != rare_sign) for extra in extra_cycles):
double_counting = True
break
if double_counting:
continue
all_cycles = [a, b, c] + relevant_extras
all_edges = cycle_edge_sets[a].union(cycle_edge_sets[b]).union(cycle_edge_sets[c])
found_extra = False
for edge in all_edges:
node_uses_after_elimination = Counter()
pfls_after_elimination = 0
nfls_after_elimination = 0
for cycle in all_cycles:
if edge not in cycle_edge_sets[cycle]:
node_uses_after_elimination.update(cycle.value)
if cycle.tag[1]:
pfls_after_elimination += 1
else:
nfls_after_elimination += 1
still_mixed = pfls_after_elimination > 0 and nfls_after_elimination > 0
if (possible_type1 or still_mixed) and len(node_uses_after_elimination) > 0 and max(node_uses_after_elimination.values()) >= 3:
found_extra = True
break
if found_extra:
continue
if possible_type1:
type1 += 1
if callback is not None:
callback('instance', ('Type1', all_nodes, triplet))
else:
mixed += 1
if callback is not None:
callback('instance', ('MixHF', all_nodes, triplet))
if callback is not None:
callback('stage', 'Checking fused pairs')
missa = 0
minimissa = 0
excitable = 0
for pair in cycle_graph.edges:
holder1, holder2 = pair
all_nodes = holder1.value.union(holder2.value)
if max_motif_size and len(all_nodes) > max_motif_size:
continue
if holder1.tag[1] and holder2.tag[1] and (holder1.tag[2] != holder2.tag[2]):
missa += 1
if callback is not None:
callback('instance', ('MISSA', all_nodes, pair))
if len(holder1.value) == 1 or len(holder2.value) == 1:
minimissa += 1
if callback is not None:
callback('instance', ('uMISSA', all_nodes, pair))
elif check_nfl and holder1.tag[1] != holder2.tag[1]:
excitable += 1
if callback is not None:
callback('instance', ('Excite', all_nodes, pair))
if callback is not None:
callback('stage', 'Searching for Type II and MISA motifs')
checked = 0
type2 = 0
misa = 0
for holder in cycle_sets:
if not holder.tag[1]:
continue
neighbors = [h for h in cycle_graph.neighbors(holder) if h.tag[1]]
for i1, neigh1 in enumerate(neighbors):
if coverextrapfl(holder, neigh1):
continue
for i2 in range(i1 + 1, len(neighbors)):
neigh2 = neighbors[i2]
all_nodes = holder.value.union(neigh1.value).union(neigh2.value)
if max_motif_size and len(all_nodes) > max_motif_size:
continue
if cycle_graph.has_edge(neigh1, neigh2):
continue
if coverextrapfl(holder, neigh2):
continue
type2 += 1
triplet = (neigh1, holder, neigh2)
if callback is not None:
callback('instance', ('Type2', all_nodes, triplet))
if ismutualinhibition(network, holder.tag[0], neigh1.value, neigh2.value):
misa += 1
if callback is not None:
callback('instance', ('MISA', all_nodes, triplet))
if callback is not None:
checked += 1
callback('cycle_progress', checked)
pfls = sum(1 for holder in cycle_sets if holder.tag[1])
return (pfls, type1, type2, misa, missa, minimissa, mixed, excitable)
def hasrepression(graph, cycle):
"""Return whether the cycle (list of node IDs) in the graph (NetworkX DiGraph) includes any repressions."""
return any(graph.edges[cycle[i], cycle[(i + 1) % len(cycle)]]['repress'] for i in range(len(cycle)))
def findtriangles(graph, callback):
"""
Generate triangles (tuples of cycle holders) of the cycle intersection graph for countmotifs.
To avoid duplicates, the third node C is always after A and B in the IdentityHolder total order.
"""
checked = 0
for a, b in graph.edges:
for c in frozenset(graph[a]).intersection(frozenset(graph[b])):
if a.isbefore(c) and b.isbefore(c):
yield (a, b, c)
if callback is not None:
checked += 1
if checked % 100 == 0:
callback('overlap_progress', checked)
def countmotifspernode(callback, *args, **kwargs):
"""
Systematically count motif instances and how many of each motif each node is involved in.
Wraps countmotifs. The callback, if specified, will not receive "instance" notifications.
Returns a tuple: countmotifs results tuple, dict of dicts {motif: {node: instances}}.
"""
motif_involvement = defaultdict(Counter)
def counting_callback(notification, data):
if notification == 'instance':
motif, node_ids, _ = data
motif_involvement[motif].update(node_ids)
elif callback is not None:
callback(notification, data)
counts = countmotifs(*args, **kwargs, callback=counting_callback)
named_motif_involvement = {motif: {graph.nodes[node]['name']: counts for node, counts in motif_counts.items()} for motif, motif_counts in motif_involvement.items()}
return counts, named_motif_involvement
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='GraphML file to process')
parser.add_argument('--maxcycle', type=int, help='maximum cycle length')
parser.add_argument('--maxnodes', type=int, help='maximum number of nodes in a motif')
parser.add_argument('--checknfl', action='store_true', help='also search for motifs involving negative feedback')
parser.add_argument('--nodecounts', nargs='?', const=1, type=str, help='count how many motifs each node is in (optional CSV output)')
parser.add_argument('-q', '--quiet', action='store_true', help='do not print progress')
args = parser.parse_args()
graph = nx.convert_node_labels_to_integers(nx.read_graphml(args.file))
cycle_count, node_sharing_count = 0, 0
def progress_callback(notification, data):
global cycle_count, node_sharing_count
if notification == 'stage':
print(data)
elif notification == 'cycle_count':
cycle_count = data
print(f'{cycle_count} cycles, ', end='')
elif notification == 'overlap_count':
node_sharing_count = data
print(f'{node_sharing_count} node sharings')
elif notification == 'overlap_progress':
print(f'{data}/{node_sharing_count}\r', end='')
elif notification == 'cycle_progress':
print(f'{data}/{cycle_count}\r', end='')
callback = None if args.quiet else progress_callback
if args.nodecounts is not None:
result, motif_involvement = countmotifspernode(callback, graph, args.maxcycle, args.maxnodes, args.checknfl)
else:
result = countmotifs(graph, args.maxcycle, args.maxnodes, args.checknfl, callback)
print(''.ljust(20, ' '), '\r', sep='', end='')
print('PFL', result[0], '\nType1', result[1], '\nType2', result[2], '\nMISA', result[3], '\nMISSA', result[4], '\nuMISSA', result[5], sep='\t')
if args.checknfl:
print('MixHF', result[6], '\nExcite', result[7], sep='\t')
if args.nodecounts is not None:
df = | pd.DataFrame.from_dict(motif_involvement) | pandas.DataFrame.from_dict |
import matplotlib
#matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import re
import logging
from matplotlib.backends.backend_pdf import PdfPages
from cell_cycle_gating import dead_cell_filter as dcf
from cell_cycle_gating import dead_cell_filter_ldrint as dcf_int
from cell_cycle_gating import cellcycle_phases as cc
from cell_cycle_gating import ph3_filter as pf
from cell_cycle_gating.findpeaks import get_kde, findpeaks, get_prominence_reference_level
import seaborn as sns
def run(data, ndict, dfm=None,
ph3_channel=True, ldr_channel=True,
px_edu=None, x_ldr=None, control_based_gating=False,
control_gates=None, fudge_gates=np.array([0, 0, 0, 0]),
system=None, header=7, ldr_gates=None,
nuclei_population_name="Nuclei Selected"):
"""Executes cell cycle gating on all wells for which object level
data is available in object_level_directory. Plots and saves summary pdf
of DNA v EdU distribution with automated gatings. A dataframe summarizing
results for each well is also saved.
Parameters
----------
object_level_directory : str
name of folder containing object level data for a single plate
dfm : Optional[pandas dataframe]
metadata table of experimental design. Default is None.
ph3_channel : Optional[bool]
True if data on pH3 intensity is in object level data.
nuclei_population_name : Optional[str]
Name of the nuclei "population" in your Harmony analysis protocol. Used
to identify which data files to read.
Returns
-------
df : pandas dataframe
well level summary of number of live/dead cells and fraction of
cells in each phase of the cell cycle
"""
if not os.path.isdir('results'):
os.mkdir('results')
if not os.path.isdir('logs'):
os.mkdir('logs')
plt.ioff()
df_summary = pd.DataFrame()
identity_dict = {}
df_gates = pd.DataFrame()
if os.path.isdir(data):
logfile = "logs/%s.log" % data.split('[')[0]
if dfm is not None:
dfm_ord = merge_metadata(dfm, data, nuclei_population_name)
if control_based_gating:
dfm_ord = dfm_ord[dfm_ord.agent == 'DMSO'].copy()
object_level_data = dfm_ord['object_level_file'].tolist()
else:
object_level_data = [s for s in os.listdir(data)
if f'{nuclei_population_name}[0].txt' in s]
dfm_ord=None
else:
logfile = "logs/%s.log" % data.split('.txt')[0]
df_input = pd.read_table(data, header=header)
df_input = df_input.rename(columns=ndict)
if dfm is not None:
barcode = data.split('.txt')[0]
dfm = dfm[dfm.barcode == barcode].copy()
metadata_wells = dfm.well.unique()
df_input['well'] = df_input['well'].astype("category")
df_input.well.cat.set_categories(metadata_wells, inplace=True)
df_input = df_input.sort_values(['well'])
dfm_ord = dfm.sort_values(['cell_line', 'agent', 'concentration'])
dfm_ord.index = dfm_ord['well']
if control_based_gating:
dfm_ord = dfm_ord[dfm_ord.agent == 'DMSO'].copy()
object_level_data = dfm_ord.well.unique()
else:
dfm_ord = None
object_level_data = df_input.well.unique()
logger = logging.getLogger()
if (logger.hasHandlers()):
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s - %(message)s")
# tell the handler to use this format
console.setFormatter(formatter)
logger.addHandler(console)
errorfile = logging.FileHandler(logfile)
errorfile.setLevel(logging.ERROR)
formatter = logging.Formatter("%(levelname)s - %(message)s")
errorfile.setFormatter(formatter)
logger.addHandler(errorfile)
#logging.basicConfig(filename=logfile, level=logging.ERROR)
nb_plots = len(object_level_data)
if control_based_gating:
pdf_pages = PdfPages('results/control_summary_%s.pdf' % data.split('.txt')[0])
else:
pdf_pages = PdfPages('results/summary_%s.pdf' % data.split('.txt')[0])
nb_plots_per_page = 10
# nb_pages = int(np.ceil(nb_plots / float(nb_plots_per_page)))
for i, file in enumerate(object_level_data):
if i % nb_plots_per_page == 0:
fig = plt.figure(figsize=(8.27, 11.69), dpi=100)
try:
if os.path.isdir(data):
df = pd.read_table('%s/%s' % (data, file))
well = re.search('result.(.*?)\[', file).group(1)
well = "%s%s" % (well[0], well[1:].zfill(2))
df['well'] = well
else:
df = df_input[df_input.well == file].copy()
df['ldrint'] = df['Cell: LDRrawINT (DDD-bckgrnd)'] - \
df['Cell: LDRbackground (DDD-bckgrnd)']
well = file
if 'dna' not in df.columns.tolist():
df['dna'] = df['Cell: Average Intensity_Average (DDD)'].multiply(
df['Cell: Area_Average (DDD)'])
if ph3_channel and 'ph3' not in df.columns.tolist():
df['ph3'] = df['Cell: pH3rawINT (DDD-bckgrnd)'] - \
df['Cell: pH3background (DDD-bckgrnd)']
df = map_channel_names(df, ndict)
# if system=='ixm':
# edu = np.array(df['edu'].tolist())
# if ph3_channel:
# ph3 = np.array(df['ph3'].tolist())
# cells_notnan = ~(np.isnan(edu) | np.isnan(ph3))
# else:
# cells_notnan = ~np.isnan(edu)
# ldr = np.array(df['ldr'].tolist())
# ldr = ldr[cells_notnan]
# ldr_min = np.max((100, ldr.min() - 100))
# ldr_max = ldr.max() + 100
# x_ldr = np.arange(ldr_min, ldr_max, 100)
fractions, gates, cell_identity = gate_well(df, dfm_ord=dfm_ord,
ph3_channel=ph3_channel,
ldr_channel=ldr_channel,
px_edu=px_edu, x_ldr=x_ldr,
control_based_gating=control_based_gating,
control_gates=control_gates,
fudge_gates=fudge_gates,
fig=fig, plot_num=i,
ldr_gates=ldr_gates)
df_summary = df_summary.append(fractions, ignore_index=True)
df_gates = df_gates.append(gates, ignore_index=True)
identity_dict[well] = cell_identity
except ValueError as err:
logging.error("%s in well %s" % (err, well))
#pass
except TypeError as err:
logging.error("%s in well %s" % (err, well))
#pass
except IndexError as err:
logging.error("%s in well %s" % (err, well))
#pass
# your code that will (maybe) throw
except np.linalg.LinAlgError as err:
if 'Singular matrix' in str(err):
logging.error("%s in well %s" % (err, well))
#pass
except ZeroDivisionError as err:
logging.error("%s in well %s" % (err, well))
#pass
except pd.io.common.EmptyDataError as err:
logging.error("%s in well %s" % (err, well))
#pass
if (i + 1) % nb_plots_per_page == 0 or (i + 1) == nb_plots:
plt.tight_layout()
pdf_pages.savefig(fig)
logging.info("Completed analysis for %d out of %d wells" %
(i+1, len(object_level_data)))
plt.close('all')
pdf_pages.close()
summary_cols = ['well', 'cell_count__total',
'mean_Sphase_edu',
# 'cell_count', 'cell_count__dead',
'G1', 'S', 'G2',
'S_dropout', 'subG1', 'beyondG2']
if ph3_channel:
summary_cols.append('M')
if ldr_channel:
summary_cols += ['cell_count', 'cell_count__dead']
df_summary = df_summary[summary_cols]
# Merge summary table with metadata if provided
if dfm is not None:
df_summary.index = df_summary['well'].tolist()
df_summary = | pd.concat([dfm_ord, df_summary], axis=1) | pandas.concat |
import os
import pandas as pd
from pandas_datareader._utils import RemoteDataError
from pandas_datareader.base import _BaseReader
AV_BASE_URL = "https://www.alphavantage.co/query"
class AlphaVantage(_BaseReader):
"""
Base class for all Alpha Vantage queries
Notes
-----
See `Alpha Vantage <https://www.alphavantage.co/>`__
"""
_format = "json"
def __init__(
self,
symbols=None,
start=None,
end=None,
retry_count=3,
pause=0.1,
session=None,
api_key=None,
):
super(AlphaVantage, self).__init__(
symbols=symbols,
start=start,
end=end,
retry_count=retry_count,
pause=pause,
session=session,
)
if api_key is None:
api_key = os.getenv("ALPHAVANTAGE_API_KEY")
if not api_key or not isinstance(api_key, str):
raise ValueError(
"The AlphaVantage API key must be provided "
"either through the api_key variable or "
"through the environment variable "
"ALPHAVANTAGE_API_KEY"
)
self.api_key = api_key
@property
def url(self):
""" API URL """
return AV_BASE_URL
@property
def params(self):
return {"function": self.function, "apikey": self.api_key}
@property
def function(self):
""" Alpha Vantage endpoint function"""
raise NotImplementedError
@property
def data_key(self):
""" Key of data returned from Alpha Vantage """
raise NotImplementedError
def _read_lines(self, out):
try:
df = | pd.DataFrame.from_dict(out[self.data_key], orient="index") | pandas.DataFrame.from_dict |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import datetime
import unittest
from re import escape
from typing import Any, List, Optional, Sequence, Tuple, Union, cast # noqa: F401
import numpy as np
import pandas as pd
import six
from ddt import data, ddt, unpack
from purplequery.binary_expression import BinaryExpression
from purplequery.bq_abstract_syntax_tree import (EMPTY_CONTEXT, EMPTY_NODE, # noqa: F401
AbstractSyntaxTreeNode, EvaluatableNode,
EvaluationContext, Field, GroupedBy, _EmptyNode)
from purplequery.bq_types import (BQArray, BQScalarType, BQStructType, BQType, # noqa: F401
PythonType, TypedDataFrame, TypedSeries)
from purplequery.dataframe_node import QueryExpression, Select, TableReference
from purplequery.evaluatable_node import LiteralType # noqa: F401
from purplequery.evaluatable_node import (Case, Cast, Exists, Extract, FunctionCall, If, InCheck,
Not, NullCheck, Selector, UnaryNegation, Value)
from purplequery.grammar import select as select_rule
from purplequery.grammar import query_expression
from purplequery.query_helper import apply_rule
from purplequery.storage import DatasetTableContext
from purplequery.tokenizer import tokenize
@ddt
class EvaluatableNodeTest(unittest.TestCase):
def setUp(self):
# type: () -> None
self.small_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1], [2]], columns=['a']),
types=[BQScalarType.INTEGER]
)
}
}
})
self.large_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, 3], [1, 4, 3]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
def test_selector(self):
# type: () -> None
selector = Selector(Field(('a',)), 'field_alias')
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(list(typed_series.dataframe), ['field_alias'])
self.assertEqual(typed_series.types, [BQScalarType.INTEGER])
def test_selector_group_by_success(self):
# type: () -> None
selector = Selector(Field(('c',)), EMPTY_NODE)
selector.position = 1
context = EvaluationContext(self.large_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
context.exclude_aggregation = True
updated_selector, = context.do_group_by([selector], [Field(('my_table', 'c'))])
typed_series = updated_selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [3])
@data((5, BQScalarType.INTEGER),
(1.23, BQScalarType.FLOAT),
("something", BQScalarType.STRING),
(True, BQScalarType.BOOLEAN),
(None, None))
@unpack
def test_value_repr(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check Value's string representation'''
node = Value(value, type_)
representation = 'Value(type_={}, value={})'.format(type_.__repr__(), value.__repr__())
self.assertEqual(node.__repr__(), representation)
@data((5, None),
(None, BQScalarType.INTEGER))
@unpack
def test_invalid_value(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check that None is only allowed as both value and type_ or neither.'''
with self.assertRaises(ValueError):
Value(value, type_)
def test_value_eval(self):
# type: () -> None
# A constant is repeated for each row in the context table.
value = Value(12345, BQScalarType.INTEGER)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')), 'foo')
typed_series = value.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [12345, 12345])
def test_field(self):
# type: () -> None
field = Field(('a',))
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = field.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(typed_series.series.name, 'a')
@data(
dict(function_name='sum', args=[Field(('a',))], expected_result=[3],
is_aggregating=True),
dict(function_name='max', args=[Field(('a',))], expected_result=[2],
is_aggregating=True),
dict(function_name='min', args=[Field(('a',))], expected_result=[1],
is_aggregating=True),
dict(function_name='concat',
args=[Value('foo', BQScalarType.STRING), Value('bar', BQScalarType.STRING)],
expected_result=['foobar'] * 2), # two copies to match length of context table.
dict(function_name='mod',
args=[Field(('a',)), Value(2, BQScalarType.INTEGER)],
expected_result=[1, 0]),
dict(function_name='mod',
args=[Value(1.0, BQScalarType.FLOAT), Value(2, BQScalarType.INTEGER)],
expected_result=[1.0, 1.0]),
dict(function_name='timestamp',
args=[Value("2019-04-22", BQScalarType.STRING)],
expected_result=[datetime.datetime(2019, 4, 22)] * 2), # two copies to match table len
)
@unpack
def test_functions(self, function_name, args, expected_result, is_aggregating=False):
# type: (str, List[EvaluatableNode], List[PythonType], bool) -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
if is_aggregating:
context.do_group_by((), [])
result = FunctionCall.create(function_name, args, EMPTY_NODE).evaluate(context)
assert isinstance(result, TypedSeries)
self.assertEqual(
[result.type_.convert(elt) for elt in result.series],
expected_result)
def test_current_timestamp(self):
# type: () -> None
node, leftover = apply_rule(query_expression, tokenize(
'select current_timestamp(), a from unnest([struct(1 as a), struct(2), struct(3)])'))
assert isinstance(node, QueryExpression)
self.assertFalse(leftover)
result, _ = node.get_dataframe(DatasetTableContext({}))
table = cast(List[List[datetime.datetime]], result.to_list_of_lists())
self.assertEqual(len(table), 3)
# CURRENT_TIMESTAMP() returns a very recent timestamp
self.assertLess((datetime.datetime.now() - table[0][0]).seconds, 2)
# All rows have the same timestamp value.
self.assertEqual(table[0][0], table[1][0])
self.assertEqual(table[0][0], table[2][0])
@data(
# These expressions are ones whose EvaluatableNode subclass constructs a
# new pandas Series rather than computing on existing ones. See below:
# this runs the risk of constructing it with an incorrect index.
dict(query='select 10, c', expected_result=[[10, 6], [10, 9]]),
dict(query='select [a, b], c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select (a, b), c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select exists(select 1), c', expected_result=[[True, 6], [True, 9]]),
dict(query='select a in (1, 4), c', expected_result=[[True, 6], [False, 9]]),
dict(query='select row_number() over (), c', expected_result=[[1, 6], [2, 9]]),
dict(query='select current_timestamp() > timestamp("2019-01-01"), c',
expected_result=[[True, 6], [True, 9]]),
)
@unpack
def test_constructed_column_has_correct_index(self, query, expected_result):
# type: (str, List[List[int]]) -> None
'''Checks that manually constructed columns have the same index as the data.
A manually constructed column will usually have an index 0, 1, 2, ...
(e.g. pd.Series(['a', 'b', 'c']) has index 0, 1, 2).
The data may not; filtering, sorting or other changes might result in an index of
different numbers. If one column's index doesn't match the index of other columns,
it can't be compared or joined with them properly.
'''
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, -1], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
# Skip the first row of the table, so that the index of the table that
# the test queries operate on is [1, 2]; this makes sure that the index is
# different from the default index you would get for a two-row column,
# which would be [0, 1], to test that expressions are not incorrectly
# using that default index.
node, leftover = select_rule(tokenize(query + ' from (select * from my_table where c > 0)'))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(list(result.dataframe.index), [1, 2])
def test_bad_function(self):
# type: () -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
with self.assertRaisesRegexp(NotImplementedError, 'NOT_A_FUNCTION not implemented'):
FunctionCall.create('not_a_function', [], EMPTY_NODE).evaluate(context)
@data(
# Explore each aggregate function, along with a non-aggregate function to make sure we
# can compute both at once.
dict(selectors='sum(a), b+10', expected_result=[[6, 11], [5, 12]]),
dict(selectors='sum(a), 20+10', expected_result=[[6, 30], [5, 30]]),
dict(selectors='sum(a+1), b+10', expected_result=[[8, 11], [6, 12]]),
dict(selectors='max(a), b+10', expected_result=[[4, 11], [5, 12]]),
dict(selectors='min(a), b+10', expected_result=[[2, 11], [5, 12]]),
dict(selectors='count(a), b+10', expected_result=[[2, 11], [1, 12]]),
dict(selectors='count(*), b+10', expected_result=[[2, 11], [2, 12]]),
dict(selectors='array_agg(a), []', expected_result=[[(2, 4), ()], [(5, None), ()]]),
dict(selectors='array_agg(a), [b]', expected_result=[[(2, 4), (1,)], [(5, None), (2,)]]),
dict(selectors='array_agg(a), [7, 8]', expected_result=[[(2, 4), (7, 8)],
[(5, None), (7, 8)]]),
dict(selectors='array_agg(a), b+10', expected_result=[[(2, 4), 11], [(5, None), 12]]),
)
@unpack
def test_aggregate_functions_in_group_by(self, selectors, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[2, 1], [4, 1], [5, 2], [np.nan, 2]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table group by b'.format(selectors))
node, leftover = select_rule(tokens)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
dict(query='select sum(a + 1) + 2, count(*) + 3, 4 from my_table',
expected_result=[[11, 6, 4]]),
)
@unpack
def test_aggregate_functions_in_expressions(self, query, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1], [2], [3]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
# Test all variations of creating a struct (typed, typeless, tuple),
# with and without named fields, with one field, and then with two
# fields.
dict(query='SELECT STRUCT<INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT<a INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1 AS a)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
# Note: no test of single-element tuple syntax, as that would just be a
# parenthesized expression, there's no analogue to Python's trailing comma.
dict(query='SELECT STRUCT<INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT (1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
)
@unpack
def test_struct_constant_expressions(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQStructType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
# Test all three struct syntaxes, selecting a column as one field, a
# constant as the other.
dict(query='SELECT (a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT(a as x, "a" as y) FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT<x INTEGER, y STRING>(a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
)
@unpack
def test_struct_field_and_constant(self, query, expected_result, expected_types):
# type: (str, List[List[Tuple[Optional[int], ...]]], Sequence[BQStructType]) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(self.small_table_context)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(result.types, expected_types)
@data(
# Test combination types of arrays and structs.
dict(query='SELECT ([1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType([None, None], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT STRUCT<x ARRAY<INTEGER>, y STRING>(ARRAY<INTEGER>[1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType(['x', 'y'], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT [(1, "a")]',
expected_result=((1, 'a'), ),
expected_type=BQArray(BQStructType([None, None], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b STRING>(1, "a"), (2, "b")]',
expected_result=((1, 'a'), (2, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
# Test that an array of structs merges and coerces the types of the
# structs.
dict(query='SELECT [STRUCT<a FLOAT, STRING>(1.0, "a"), STRUCT<INTEGER, b STRING>(2, "b")]',
expected_result=((1.0, 'a'), (2.0, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.FLOAT,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b ARRAY<STRING> >(1, ["a"]), (2, ["b", "c"])]',
expected_result=((1, ('a',)), (2, ('b', 'c'))),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQArray(BQScalarType.STRING)]))),
)
@unpack
def test_complex_types(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
dict(query='SELECT ARRAY_AGG(a)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(a RESPECT NULLS)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a RESPECT NULLS)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(a IGNORE NULLS)',
expected_result=(1, 1, 2)),
dict(query='SELECT ARRAY_AGG(DISTINCT a IGNORE NULLS)',
expected_result=(1, 2)),
)
@unpack
def test_array_agg_arguments(self, query, expected_result):
# type: (str, Tuple[Optional[int], ...]) -> None
table_context = DatasetTableContext(
{'p': {'d': {'t':
TypedDataFrame(pd.DataFrame([[1], [1], [2], [None]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query + ' FROM p.d.t'))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
@data(
dict(query='SELECT [1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT STRUCT<INT64>(3.7)',
error='Struct field 1 has type .*FLOAT which does not coerce to .*INTEGER'),
dict(query='SELECT ARRAY<INT64>[3.7]',
error='Array specifies type .*INTEGER, incompatible with values of type .*FLOAT'),
dict(query='SELECT ARRAY<INT64>[1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT ARRAY<string>[1,2]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT [[1]]',
error='Cannot create arrays of arrays'),
dict(query='SELECT [(1, 2), (3, 4, 5)]',
error='Cannot merge .* number of fields varies'),
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as x, 4 as b)]',
error='Cannot merge Structs; field names .* do not match'),
# same types in different orders can't merge.
dict(query='SELECT [(1, "a"), ("b", 2)]',
error='Cannot implicitly coerce the given types'),
# same names in different orders can't merge
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as b, 4 as a)]',
error='Cannot merge Structs; field names .* do not match'),
)
@unpack
def test_complex_type_errors(self, query, error):
# type: (str, str) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
with self.assertRaisesRegexp(ValueError, error):
node.get_dataframe(self.small_table_context)
@data(
# Row number over whole dataset; order is not guaranteed
dict(selectors='row_number() over ()', expected_result=[[1], [2], [3], [4]]),
dict(selectors='row_number() over (order by a), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a asc), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a desc), a',
expected_result=[[4, 10], [3, 20], [2, 30], [1, 30]]),
dict(selectors='row_number() over (partition by b order by a), a',
expected_result=[[1, 10], [2, 20], [1, 30], [2, 30]]),
dict(selectors='sum(a) over (), a',
expected_result=[[90, 10], [90, 20], [90, 30], [90, 30]]),
dict(selectors='sum(a) over (partition by b), a',
expected_result=[[30, 10], [30, 20], [60, 30], [60, 30]]),
dict(selectors='count(*) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(a) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(*) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='count(a) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='sum(count(*)) over ()',
expected_result=[[4]]),
)
@unpack
def test_analytic_function(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[20, 200], [10, 200], [30, 300], [30, 300]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table'.format(selectors))
node, leftover = select_rule(tokens)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
# Note: BQ docs say if ORDER BY clause (for the select as a whole) is not present, order of
# results is undefined, so we do not assert on the order.
six.assertCountEqual(self, result.to_list_of_lists(), expected_result)
@data(
dict(selectors='sum(count(*)) over (), count(*)',
expected_result=[[5, 2], [5, 3]]),
)
@unpack
def test_analytic_function_with_group_by(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
| pd.DataFrame([[20, 2], [10, 2], [30, 3], [31, 3], [32, 3]], columns=['a', 'b']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from scipy.stats import norm
def csv_import(currency, date):
"""
Function for importing Bloomberg FX volatility data from a csv file for
a given date. Currency should be passed as 'GBPUSD' and date as '2017-01-31'.
"""
# tab delimiter used since coming from excel
df = | pd.read_csv('data/'+ currency +'-'+ date +'.txt', header=None, sep='\s+') | pandas.read_csv |
import numpy as np
import pandas as pd
import glob
from pmdarima.arima import ndiffs
from pandas.tseries.offsets import QuarterBegin, QuarterEnd
from .hand_select import hand_select
import pandas_datareader.data as web
import xlrd, csv
from openpyxl.workbook import Workbook
from openpyxl.reader.excel import load_workbook, InvalidFileException
def set_date_as_index(df):
df.columns = [name.lower() for name in df.columns]
df["date"] = pd.to_datetime(df["date"])
df.set_index("date", inplace=True)
return df
def make_float(df):
df = df.replace(".", np.nan)
df = df.astype(float)
return df
def read_files(paths, fillna=True):
csv_list = []
xls_list = []
for path in paths:
csv_files = glob.glob(path + "/*.csv")
xls_files = glob.glob(path + "/*.xls")
for elt in csv_files:
df = pd.read_csv(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
csv_list.append(df)
for elt in xls_files:
try:
df = pd.read_excel(elt)
df = set_date_as_index(df)
df = make_float(df)
if fillna:
df = df.fillna(method='ffill')
xls_files.append(df)
except Exception:
pass
return csv_list, xls_list
def make_stationary(df):
df = hand_select(df)
df = df.dropna()
columns = df.columns
for name in columns:
x = df[name].values
d_kpss = ndiffs(x, test='kpss')
d_adf = ndiffs(x, test='adf')
d_pp = ndiffs(x, test='pp')
d_ = max(d_kpss, d_adf, d_pp)
if d_ > 0:
new_name = name + '_diff' + str(d_)
if d_ == 1:
df[new_name] = df[name].diff()
elif d_ == 2:
df[new_name] = df[name].diff().diff()
elif d_ > 2:
raise ValueError('High order differentiation')
else:
raise Exception('Some thing is wrong')
df = df.drop(columns=[name])
return df
def open_xls_as_xlsx(filename):
# first open using xlrd
book = xlrd.open_workbook(filename)
index = 0
nrows, ncols = 0, 0
while nrows * ncols == 0:
sheet = book.sheet_by_index(index)
nrows = sheet.nrows
ncols = sheet.ncols
index += 1
# prepare a xlsx sheet
book1 = Workbook()
sheet1 = book1.active
for row in range(1, nrows):
for col in range(1, ncols):
sheet1.cell(row=row, column=col).value = sheet.cell_value(row, col)
return book1
def read_data(path, sheet=False, header='infer'):
file_format = path.split('.')[-1]
if 'msci' in path:
header = 6
if sheet is False:
# if file_format == 'csv':
# df = pd.read_csv(path, header=header)
# elif file_format == 'xls':
# df = open_xls_as_xlsx(path)
# else:
try:
df = pd.read_excel(path, header=header, engine='openpyxl')
except Exception:
try:
df = open_xls_as_xlsx(path)
except Exception as e:
try:
df = pd.read_csv(path, header=header)
except Exception as e:
raise Exception(e)
else:
try:
# excel_file = pd.ExcelFile(path)
# assert sheet in excel_file.sheet_names
# df = excel_file.parse(sheet, header=header)
df = pd.read_excel(path, header=header, engine='openpyxl', sheet_name=sheet)
except Exception:
raise Exception("Can not read sheet")
df.columns = [name.lower() for name in df.columns]
if 'year2' in df.columns:
drop_columns = ['year2']
else:
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
first_valid = df.iloc[:, 1].first_valid_index()
last_valid = df.iloc[:, 1].last_valid_index() + 1
df = df.iloc[first_valid:last_valid]
df.columns = df.columns.str.replace('.', '_')
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.replace('__', '_')
return df
def make_monthly_date(df, offset=True):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 100
+ df['month'].astype(int)
).astype(str),
format='%Y%m'
)
if offset:
datetime += pd.tseries.offsets.MonthBegin(1)
else:
datetime = datetime
df['date'] = datetime
df.drop(columns=['year', 'month'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
return df
def make_quarterly_date(df, offset=True):
df['year'] = df['year'].str.lower()
df['year'] = df['year'].str.replace(r'(q\d)-(\d+)', r'\2-\1')
if offset:
# Bug that quarterbegin is March 01
df['date'] = pd.to_datetime(df['year'])\
+ pd.tseries.offsets.DateOffset(days=1)\
+ pd.tseries.offsets.QuarterBegin(1, startingMonth=1)
else:
df['date'] = pd.to_datetime(df['year'])
df.drop(columns=['year'], inplace=True)
df.set_index('date', inplace=True)
# Manually shift because of QuarterBegin bug
df.columns = [elt + '_quarterly' for elt in df.columns]
df = df.dropna()
return df
def make_daily_date(df):
datetime = pd.to_datetime(
(
df['year'].astype(int) * 10000
+ df['month'].astype(int) * 100
+ df['day'].astype(int)
).astype(str),
format='%Y%m%d'
)
df['date'] = datetime
df.drop(columns=['year', 'month', 'day'], inplace=True)
df.set_index('date', inplace=True)
df.columns = [elt + '_daily' for elt in df.columns]
return df
# If date of low frequency data is specified, assume It is announced
# before the start of the market
# If not specified, assume it is announced after the market is closed
def daily_data(df, freq, offset=True, fill_method='ffill'):
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
if freq.lower() == 'monthly':
try:
df = make_monthly_date(df, offset=offset)
except Exception:
print("set monthly date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_monthly' for elt in df.columns]
df = make_stationary(df)
if offset:
daily_datetime = pd.date_range(
df.index[0] + pd.tseries.offsets.MonthBegin(1),
df.index[-1] + pd.tseries.offsets.MonthEnd(1),
freq='D'
)
else:
daily_datetime = pd.date_range(
df.index[0] + pd.tseries.offsets.MonthBegin(1),
df.index[-1] + pd.tseries.offsets.MonthEnd(1),
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
elif freq.lower() == 'daily':
try:
df = make_daily_date(df)
except Exception:
print("set daily date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_daily' for elt in df.columns]
df = make_stationary(df)
daily_datetime = pd.date_range(
df.index[0],
df.index[-1],
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
elif freq.lower() == 'quarterly':
try:
df = make_quarterly_date(df)
except Exception:
print("set quarterly date as index")
datetime = pd.to_datetime(df['date'])
df['date'] = datetime
df.set_index('date', inplace=True)
df.columns = [elt + '_quarterly' for elt in df.columns]
df = make_stationary(df)
if offset:
daily_datetime = pd.date_range(
df.index[0] + QuarterBegin(1, startingMonth=1),
df.index[-1] + QuarterEnd(1, startingMonth=1),
freq='D'
)
else:
daily_datetime = pd.date_range(
df.index[0],
df.index[-1] + pd.tseries.offsets.QuarterEnd(1),
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
else:
print("Type frequency")
daily_datetime = pd.date_range(
df.index[0], df.index[-1],
freq='D'
)
df = df.reindex(daily_datetime, method=fill_method)
drop_columns = []
for elt in df.columns:
if 'unnamed' in elt:
drop_columns.append(elt)
df.drop(columns=drop_columns, inplace=True)
return df
def get_nonfinancial():
print('monthly epu')
monthly_epu = read_data(
'https://www.policyuncertainty.com/media/All_Country_Data.xlsx'
)
daily_epu = daily_data(monthly_epu, 'monthly')
daily_epu.columns = ['epu_' + elt for elt in daily_epu.columns]
print('daily_infectious')
daily_infectious = read_data(
'https://www.policyuncertainty.com/media/All_Infectious_EMV_Data.csv'
)
daily_infectious = daily_data(daily_infectious, 'daily')
daily_infectious.columns = [
'daily_infectious_' + elt for elt in daily_infectious.columns]
print('categorical_epu')
categorical_epu = read_data(
'https://www.policyuncertainty.com/media/Categorical_EPU_Data.xlsx'
)
categorical_epu = daily_data(categorical_epu, 'monthly')
categorical_epu.columns = [
'categorical_epu_' + elt for elt in categorical_epu.columns]
# print('eurq_data')
# eurq_data = read_data(
# '../../data/epu/EURQ_data.xlsx',
# sheet='EURQ'
# )
# eurq_data = daily_data(eurq_data, 'monthly')
# eurq_data.columns = ['eurq_data_' + elt for elt in eurq_data.columns]
# print('trade_unc')
# trade_uncertainty_data = read_data(
# 'https://www.policyuncertainty.com/media/Trade_Uncertainty_Data.xlsx'
# )
# trade_uncertainty_data = daily_data(trade_uncertainty_data, 'monthly')
# trade_uncertainty_data.columns = [
# 'trade_uncertainty_' + elt for elt in trade_uncertainty_data.columns
# ]
print('wpui')
wpui_url = (
'https://worlduncertaintyindex.com/'
'wp-content/uploads/2020/07/WPUI_Data.xlsx'
)
wpui_data = read_data(
wpui_url, sheet='F1', header=1
)
wpui_data = daily_data(wpui_data, 'quarterly')
wpui_data.columns = [
'wpui_' + elt for elt in wpui_data.columns
]
print('wui')
wui_url = (
'https://worlduncertaintyindex.com/'
'wp-content/uploads/2020/07/WUI_Data.xlsx'
)
wui_data = read_data(
wui_url, sheet='F1', header=2
)
wui_data = daily_data(wui_data, 'quarterly')
wui_data.columns = [
'wui_' + elt for elt in wui_data.columns
]
df_non_financial = pd.concat(
[
daily_epu, daily_infectious, categorical_epu,
# eurq_data, trade_uncertainty_data,
wpui_data, wui_data
], axis=1
)
print('non-financial data')
return df_non_financial
def get_financial():
print('finance data')
sp500 = df = web.DataReader(
'^GSPC', 'yahoo',
start='1990-01-03', end='2020-08-31'
)
sp500.columns = [elt.lower().replace(' ', '_') for elt in sp500.columns]
try:
sp500.set_index('date', inplace=True)
except Exception:
pass
sp500.index.name = 'date'
sp500.index = pd.DatetimeIndex(sp500.index)
print('dex jp us')
dexjpus_url = (
'https://fred.stlouisfed.org/graph/fredgraph.csv?'
'bgcolor=%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&'
'graph_bgcolor=%23ffffff&height=450&mode=fred&recession_bars='
'on&txtcolor=%23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc='
'0&show_legend=yes&show_axis_titles=yes&show_tooltip=yes&id='
'DEXJPUS&scale=left&cosd=1971-01-04&coed=2020-08-28&'
'line_color=%234572a7&link_values=false&line_style=solid&'
'mark_type=none&mw=3&lw=2&ost=-99999&oet=99999&mma=0&fml=a&'
'fq=Daily&fam=avg&fgst=lin&fgsnd=2020-02-01&line_index=1&'
'transformation=lin&vintage_date=2020-09-01&revision_date'
'=2020-09-01&nd=1971-01-04'
)
dexjpus = pd.read_csv(dexjpus_url)
dexjpus.columns = [elt.lower() for elt in dexjpus.columns]
dexjpus['date'] = pd.DatetimeIndex(dexjpus['date'])
dexjpus.set_index('date', inplace=True)
print('dex us eu')
dexuseu_url = (
'https://fred.stlouisfed.org/graph/fredgraph.csv?bgcolor='
'%23e1e9f0&chart_type=line&drp=0&fo=open%20sans&graph_bgcolor='
'%23ffffff&height=450&mode=fred&recession_bars=on&txtcolor=%'
'23444444&ts=12&tts=12&width=968&nt=0&thu=0&trc=0&show_legend='
'yes&show_axis_titles=yes&show_tooltip=yes&id=DEXUSEU&scale=left&'
'cosd=1999-01-04&coed=2020-08-28&line_color=%234572a7&link_values='
'false&line_style=solid&mark_type=none&mw=3&lw=2&ost=-99999&'
'oet=99999&mma=0&fml=a&fq=Daily&fam=avg&fgst=lin&fgsnd='
'2020-02-01&line_index=1&transformation=lin&vintage_date='
'2020-09-01&revision_date=2020-09-01&nd=1999-01-04'
)
dexuseu = pd.read_csv(dexuseu_url)
dexuseu.columns = [elt.lower() for elt in dexuseu.columns]
dexuseu['date'] = | pd.DatetimeIndex(dexuseu['date']) | pandas.DatetimeIndex |
import os
import yaml
import pandas as pd
import matplotlib.pyplot as plt
from scipy import interpolate, stats
from .logger import setup_logger
def set_up_experiment(experiment, folder, logging_lvl=10):
OUTPUT_DIR = os.path.join(folder, 'results')
FIGURE_DIR = os.path.join(folder, 'figures')
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(FIGURE_DIR, exist_ok=True)
logger_name = experiment
logger = setup_logger(logger_name, OUTPUT_DIR, logging_lvl=logging_lvl)
return OUTPUT_DIR, FIGURE_DIR, logger, logger_name
def load_config(folder, dataset):
if dataset == 'mnist':
config_path = os.path.join(folder, 'mnist.yaml')
elif dataset == 'cifar':
config_path = os.path.join(folder, 'cifar.yaml')
elif dataset == 'pascalvoc_detection':
config_path = os.path.join(folder, 'pascalvoc_detection.yaml')
elif dataset == 'pascalvoc_segmentation':
config_path = os.path.join(folder, 'pascalvoc_segmentation.yaml')
elif dataset == 'coco_object_detection':
config_path = os.path.join(folder, 'coco.yaml')
with open(config_path, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def append_to_scores(data, metric, key):
if type(metric) == dict:
for subkey, submetric in metric.items():
return append_to_scores(data, submetric, f'{key}-{subkey}')
else:
data[key] = metric
return data
def extract_df(scores):
data = []
for (strategy, run), scores_experiment in scores.items():
for step_result in scores_experiment:
experiment_data = {}
for key, metric in step_result.items():
experiment_data = append_to_scores(
experiment_data, metric, key)
experiment_data = {
**experiment_data, 'strategy': strategy, 'run': run}
data.append(experiment_data)
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
def subset_grm(grm, grm_indiv, target_indiv):
set_target_indiv = set(target_indiv)
isin = np.array([ g in set_target_indiv for g in grm_indiv ])
grm = grm[:, isin][isin, :]
grm_indiv = list(np.array(grm_indiv)[isin])
return grm, grm_indiv
def subset_y(df, indiv):
df_indiv = | pd.DataFrame({'indiv': indiv}) | pandas.DataFrame |
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, | pd.Index(exp, name='xxx') | pandas.Index |
import os
import tempfile
import time
from typing import Any, Dict, List, Optional
import pandas as pd
from upgini import dataset
from upgini.http import ProviderTaskSummary, SearchTaskSummary, get_rest_client
from upgini.metadata import SYSTEM_RECORD_ID, ModelTaskType
class SearchTask:
summary: Optional[SearchTaskSummary]
def __init__(
self,
search_task_id: str,
dataset: Optional["dataset.Dataset"] = None,
return_scores: bool = False,
extract_features: bool = False,
accurate_model: bool = False,
initial_search_task_id: Optional[str] = None,
task_type: Optional[ModelTaskType] = None,
endpoint: Optional[str] = None,
api_key: Optional[str] = None,
):
self.search_task_id = search_task_id
self.initial_search_task_id = initial_search_task_id
self.dataset = dataset
self.return_scores = return_scores
self.extract_features = extract_features
self.accurate_model = accurate_model
self.task_type = task_type
self.summary = None
self.endpoint = endpoint
self.api_key = api_key
def poll_result(self, quiet: bool = False) -> "SearchTask":
completed_statuses = {"COMPLETED", "VALIDATION_COMPLETED"}
failed_statuses = {"FAILED", "VALIDATION_FAILED"}
submitted_statuses = {"SUBMITTED", "VALIDATION_SUBMITTED"}
if not quiet:
print(
f"Running {self.search_task_id} search request.\n"
"We'll email you once it's completed. Please wait a few minutes."
)
search_task_id = self.initial_search_task_id if self.initial_search_task_id is not None else self.search_task_id
try:
time.sleep(1)
self.summary = get_rest_client(self.endpoint, self.api_key).search_task_summary_v2(search_task_id)
while self.summary.status not in completed_statuses:
if not quiet:
print("\\", end="\r")
time.sleep(5)
self.summary = get_rest_client(self.endpoint, self.api_key).search_task_summary_v2(search_task_id)
if not quiet:
print("/", end="\r")
if self.summary.status in failed_statuses:
raise RuntimeError("Oh! Server did something wrong, please retry with new search request.")
if self.summary.status in submitted_statuses and len(self._get_provider_summaries(self.summary)) == 0:
raise RuntimeError(
"No datasets found to intersect with uploaded file using defined search keys. "
"Try with another set of keys or different time period."
)
time.sleep(5)
except KeyboardInterrupt:
print("Search interrupted. Stopping search request...")
get_rest_client(self.endpoint, self.api_key).stop_search_task_v2(search_task_id)
print("Search request stopped")
raise
print()
has_completed_provider_task = False
for provider_summary in self._get_provider_summaries(self.summary):
if provider_summary.status == "COMPLETED":
has_completed_provider_task = True
if not has_completed_provider_task:
raise RuntimeError(
"All search tasks in the request have failed: "
+ ",".join([self._error_message(x) for x in self._get_provider_summaries(self.summary)])
+ "."
)
return self
@staticmethod
def _get_provider_summaries(summary: SearchTaskSummary) -> List[ProviderTaskSummary]:
if summary.status in {
"VALIDATION_CREATED",
"VALIDATION_SUBMITTED",
"VALIDATION_COMPLETED",
"VALIDATION_FAILED",
}:
return summary.validation_important_providers
else:
return summary.initial_important_providers
@staticmethod
def _error_message(provider_summary: ProviderTaskSummary):
if provider_summary.error_message:
return provider_summary.error_message
else:
if provider_summary.status == "TIMED_OUT":
return "Search request timed out"
elif provider_summary.status == "EMPTY_INTERSECTION":
return "Datasets doesn't intersect with uploaded file"
else:
return "Internal error"
def validation(self, validation_dataset: "dataset.Dataset", extract_features: bool = False) -> "SearchTask":
return validation_dataset.validation(self.search_task_id, return_scores=True, extract_features=extract_features)
def _check_finished_initial_search(self) -> List[ProviderTaskSummary]:
if self.summary is None or len(self.summary.initial_important_providers) == 0:
raise RuntimeError("Initial search didn't start.")
return self.summary.initial_important_providers
def _check_finished_validation_search(self) -> List[ProviderTaskSummary]:
if self.summary is None or len(self.summary.validation_important_providers) == 0:
raise RuntimeError("Validation search didn't start.")
return self.summary.validation_important_providers
@staticmethod
def _has_metric(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> bool:
for provider_summary in provider_summaries:
for code in provider_summary.metrics.keys():
if code == metric_code:
return True
return False
@staticmethod
def _metric_by_provider(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> List[Dict[str, str]]:
metric_by_provider = []
for provider_summary in provider_summaries:
for code, value in provider_summary.metrics.items():
if code == metric_code:
metric_by_provider.append(
{
"provider_id": provider_summary.provider_id,
"value": value,
}
)
return metric_by_provider
@staticmethod
def _ads_search_task_id_by_provider_id(provider_summaries: List[ProviderTaskSummary], provider_id: str) -> str:
for provider_summary in provider_summaries:
if provider_summary.provider_id == provider_id:
return provider_summary.ads_search_task_id
raise RuntimeError(f"Provider {provider_id} not found.")
@staticmethod
def _search_task_id_by_provider_id(provider_summaries: List[ProviderTaskSummary], provider_id: str) -> str:
for provider_summary in provider_summaries:
if provider_summary.provider_id == provider_id:
return provider_summary.search_task_id
raise RuntimeError(f"Provider {provider_id} not found.")
@staticmethod
def _model_id_by_provider(provider_summaries: List[ProviderTaskSummary]) -> pd.DataFrame:
result = []
for provider_summary in provider_summaries:
result.append(
{
"provider_id": provider_summary.provider_id,
"model_id": provider_summary.ads_search_task_id,
}
)
return pd.DataFrame(result)
@staticmethod
def _max_by_metric(provider_summaries: List[ProviderTaskSummary], metric_code: str) -> Dict[str, Any]:
max_provider = None
max_metric = None
for x in SearchTask._metric_by_provider(provider_summaries, metric_code):
current_metric = float(x["value"])
if max_metric is None or current_metric > max_metric:
max_provider = x["provider_id"]
max_metric = current_metric
if max_metric is None:
raise RuntimeError(f"There is no {metric_code} available for search task.")
else:
return {"provider_id": max_provider, "value": max_metric}
def initial_max_auc(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "AUC"):
return self._max_by_metric(provider_summaries, "AUC")
else:
return None
def initial_max_accuracy(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return self._max_by_metric(provider_summaries, "ACCURACY")
else:
return None
def initial_max_rmse(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "RMSE"):
return self._max_by_metric(provider_summaries, "RMSE")
else:
return None
def initial_max_uplift(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return self._max_by_metric(provider_summaries, "UPLIFT")
else:
return None
def initial_max_hit_rate(self) -> Optional[Dict[str, Any]]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "HIT_RATE"):
return self._max_by_metric(provider_summaries, "HIT_RATE")
else:
return None
def _initial_min_hit_rate(self) -> float:
provider_summaries = self._check_finished_initial_search()
min_hit_rate = None
for x in self._metric_by_provider(provider_summaries, "HIT_RATE"):
current_value = float(x["value"])
if min_hit_rate is None or current_value < min_hit_rate:
min_hit_rate = current_value
if min_hit_rate is None:
raise RuntimeError("There is no hit rate available for search task.")
else:
return min_hit_rate
def initial_gini(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "GINI"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "GINI")).rename(
columns={"value": "gini"}, inplace=False
)
else:
return None
def initial_auc(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "AUC"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "AUC")).rename(
columns={"value": "roc-auc"}, inplace=False
)
else:
return None
def initial_accuracy(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "ACCURACY"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "ACCURACY")).rename(
columns={"value": "accuracy"}, inplace=False
)
else:
return None
def initial_rmse(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "RMSE"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "RMSE")).rename(
columns={"value": "rmse"}, inplace=False
)
else:
return None
def initial_uplift(self) -> Optional[pd.DataFrame]:
provider_summaries = self._check_finished_initial_search()
if self._has_metric(provider_summaries, "UPLIFT"):
return pd.DataFrame(self._metric_by_provider(provider_summaries, "UPLIFT")).rename(
columns={"value": "uplift"}, inplace=False
)
else:
return None
def initial_hit_rate(self) -> pd.DataFrame:
provider_summaries = self._check_finished_initial_search()
return pd.DataFrame(self._metric_by_provider(provider_summaries, "HIT_RATE")).rename(
columns={"value": "hit_rate"}, inplace=False
)
def initial_metadata(self) -> pd.DataFrame:
provider_summaries = self._check_finished_initial_search()
quality_df = None
auc_df = self.initial_auc()
gini_df = self.initial_gini()
accuracy_df = self.initial_accuracy()
rmse_df = self.initial_rmse()
if auc_df is not None:
quality_df = auc_df
elif gini_df is not None:
quality_df = gini_df
elif accuracy_df is not None:
quality_df = accuracy_df
elif rmse_df is not None:
quality_df = rmse_df
uplift_df = self.initial_uplift()
hit_rate_df = self.initial_hit_rate()
model_id_df = self._model_id_by_provider(provider_summaries)
result = pd.merge(model_id_df, hit_rate_df, on="provider_id")
if quality_df is not None:
result = | pd.merge(result, quality_df, on="provider_id") | pandas.merge |
# Copyright 2020-2021 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from gatspy import periodic
import java
import copy
from astropy.time import Time
import requests
import dash
import dash_table
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import plotly.express as px
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from apps.utils import convert_jd, readstamp, _data_stretch, convolve
from apps.utils import apparent_flux, dc_mag
from app import APIURL
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microlmodels, microltoolbox
from pyLIMA.microloutputs import create_the_fake_telescopes
from app import client, app, clientSSO
# colors_ = [
# '#1f77b4', # muted blue
# '#ff7f0e', # safety orange
# '#2ca02c', # cooked asparagus green
# '#d62728', # brick red
# '#9467bd', # muted purple
# '#8c564b', # chestnut brown
# '#e377c2', # raspberry yogurt pink
# '#7f7f7f', # middle gray
# '#bcbd22', # curry yellow-green
# '#17becf' # blue-teal
# ]
colors_ = [
"rgb(165,0,38)",
"rgb(215,48,39)",
"rgb(244,109,67)",
"rgb(253,174,97)",
"rgb(254,224,144)",
"rgb(224,243,248)",
"rgb(171,217,233)",
"rgb(116,173,209)",
"rgb(69,117,180)",
"rgb(49,54,149)"
]
all_radio_options = {
"Difference magnitude": ["Difference magnitude", "DC magnitude", "DC apparent flux"],
"DC magnitude": ["Difference magnitude", "DC magnitude", "DC apparent flux"],
"DC apparent flux": ["Difference magnitude", "DC magnitude", "DC apparent flux"]
}
layout_lightcurve = dict(
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
hoverlabel={
'align': "left"
},
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
xaxis={
'title': 'Observation date',
'automargin': True
},
yaxis={
'autorange': 'reversed',
'title': 'Magnitude',
'automargin': True
}
)
layout_lightcurve_preview = dict(
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
hoverlabel={
'align': "left"
},
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
xaxis={
'title': 'Observation date',
'automargin': True
},
yaxis={
'autorange': 'reversed',
'title': 'Magnitude',
'automargin': True
}
)
layout_phase = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=40, t=25),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
yanchor="bottom",
y=0.02,
xanchor="right",
x=1,
bgcolor='rgba(218, 223, 225, 0.3)'
),
xaxis={
'title': 'Phase'
},
yaxis={
'autorange': 'reversed',
'title': 'Apparent DC Magnitude'
},
title={
"text": "Phased data",
"y": 1.01,
"yanchor": "bottom"
}
)
layout_mulens = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=40, t=25),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
bgcolor='rgba(218, 223, 225, 0.3)'
),
xaxis={
'title': 'Observation date'
},
yaxis={
'autorange': 'reversed',
'title': 'DC magnitude'
},
title={
"text": "pyLIMA Fit (PSPL model)",
"y": 1.01,
"yanchor": "bottom"
}
)
layout_scores = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
hoverlabel={
'align': "left"
},
xaxis={
'title': 'Observation date',
'automargin': True
},
yaxis={
'title': 'Score',
'range': [0, 1]
}
)
layout_colors = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
hoverlabel={
'align': "left"
},
xaxis={
'automargin': True
},
yaxis={
'title': 'Delta magnitude'
}
)
layout_colors_rate = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
hoverlabel={
'align': "left"
},
xaxis={
'automargin': True
},
yaxis={
'title': 'Rate (mag/day)'
}
)
layout_sso_lightcurve = dict(
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
hoverlabel={
'align': "left"
},
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
xaxis={
'title': 'Observation date',
'automargin': True
},
yaxis={
'autorange': 'reversed',
'title': 'Magnitude',
'automargin': True
}
)
layout_sso_radec = dict(
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
hoverlabel={
'align': "left"
},
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
y=1.2,
bgcolor='rgba(218, 223, 225, 0.3)'
),
yaxis={
'title': 'Declination',
'automargin': True
},
xaxis={
'autorange': 'reversed',
'title': 'Right Ascension',
'automargin': True
}
)
def extract_scores(data: java.util.TreeMap) -> pd.DataFrame:
""" Extract SN scores from the data
"""
values = ['i:jd', 'd:snn_snia_vs_nonia', 'd:snn_sn_vs_all', 'd:rfscore']
pdfs = pd.DataFrame.from_dict(data, orient='index')
if pdfs.empty:
return pdfs
return pdfs[values]
def plot_classbar(pdf, is_mobile=False):
grouped = pdf.groupby('v:classification').count()
alert_per_class = grouped['i:objectId'].to_dict()
# descending date values
top_labels = pdf['v:classification'].values[::-1]
customdata = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso')).values[::-1]
x_data = [[1] * len(top_labels)]
y_data = top_labels
colors = {
'Early SN Ia candidate': 'red',
'SN candidate': 'orange',
'Kilonova candidate': 'blue',
'Microlensing candidate': 'green',
'Solar System MPC': "rgb(254,224,144)",
'Solar System candidate': "rgb(171,217,233)",
'Ambiguous': 'rgb(116,196,118)',
'Unknown': '#7f7f7f'
}
colors = [colors_[-1] if j not in colors.keys() else colors[j] for j in top_labels]
fig = go.Figure()
is_seen = []
for i in range(0, len(x_data[0])):
for xd, yd, label in zip(x_data, y_data, top_labels):
if top_labels[i] in is_seen:
showlegend = False
else:
showlegend = True
is_seen.append(top_labels[i])
percent = np.round(alert_per_class[top_labels[i]] / len(pdf) * 100).astype(int)
if is_mobile:
name_legend = top_labels[i]
else:
name_legend = top_labels[i] + ': {}%'.format(percent)
fig.add_trace(
go.Bar(
x=[xd[i]], y=[yd],
orientation='h',
width=0.3,
showlegend=showlegend,
legendgroup=top_labels[i],
name=name_legend,
marker=dict(
color=colors[i],
),
customdata=[customdata[i]],
hovertemplate='<b>Date</b>: %{customdata}'
)
)
if is_mobile:
legend_shift = 0.0
else:
legend_shift = 0.2
fig.update_layout(
xaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
),
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
),
legend=dict(
bgcolor='rgba(255, 255, 255, 0)',
bordercolor='rgba(255, 255, 255, 0)',
orientation="h",
traceorder="reversed",
yanchor='bottom',
itemclick=False,
itemdoubleclick=False,
x=legend_shift
),
barmode='stack',
dragmode=False,
paper_bgcolor='rgb(248, 248, 255, 0.0)',
plot_bgcolor='rgb(248, 248, 255, 0.0)',
margin=dict(l=0, r=0, b=0, t=0)
)
if not is_mobile:
fig.update_layout(title_text='Individual alert classification')
fig.update_layout(title_y=0.15)
fig.update_layout(title_x=0.0)
fig.update_layout(title_font_size=12)
if is_mobile:
fig.update_layout(legend=dict(font=dict(size=10)))
return fig
@app.callback(
Output('lightcurve_cutouts', 'figure'),
[
Input('switch-mag-flux', 'value'),
Input('url', 'pathname'),
Input('object-data', 'children'),
Input('object-upper', 'children'),
Input('object-uppervalid', 'children')
])
def draw_lightcurve(switch: int, pathname: str, object_data, object_upper, object_uppervalid) -> dict:
""" Draw object lightcurve with errorbars
Parameters
----------
switch: int
Choose:
- 0 to display difference magnitude
- 1 to display dc magnitude
- 2 to display flux
pathname: str
Pathname of the current webpage (should be /ZTF19...).
Returns
----------
figure: dict
"""
pdf_ = pd.read_json(object_data)
cols = [
'i:jd', 'i:magpsf', 'i:sigmapsf', 'i:fid',
'i:magnr', 'i:sigmagnr', 'i:magzpsci', 'i:isdiffpos', 'i:candid'
]
pdf = pdf_.loc[:, cols]
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
# shortcuts
mag = pdf['i:magpsf']
err = pdf['i:sigmapsf']
if switch == "Difference magnitude":
layout_lightcurve['yaxis']['title'] = 'Difference magnitude'
layout_lightcurve['yaxis']['autorange'] = 'reversed'
elif switch == "DC magnitude":
# inplace replacement
mag, err = np.transpose(
[
dc_mag(*args) for args in zip(
pdf['i:fid'].values,
mag.astype(float).values,
err.astype(float).values,
pdf['i:magnr'].astype(float).values,
pdf['i:sigmagnr'].astype(float).values,
pdf['i:magzpsci'].astype(float).values,
pdf['i:isdiffpos'].values
)
]
)
layout_lightcurve['yaxis']['title'] = 'Apparent DC magnitude'
layout_lightcurve['yaxis']['autorange'] = 'reversed'
elif switch == "DC apparent flux":
# inplace replacement
mag, err = np.transpose(
[
apparent_flux(*args) for args in zip(
pdf['i:fid'].astype(int).values,
mag.astype(float).values,
err.astype(float).values,
pdf['i:magnr'].astype(float).values,
pdf['i:sigmagnr'].astype(float).values,
pdf['i:magzpsci'].astype(float).values,
pdf['i:isdiffpos'].values
)
]
)
layout_lightcurve['yaxis']['title'] = 'Apparent DC flux'
layout_lightcurve['yaxis']['autorange'] = True
hovertemplate = r"""
<b>%{yaxis.title.text}</b>: %{y:.2f} ± %{error_y.array:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata}
<extra></extra>
"""
figure = {
'data': [
{
'x': dates[pdf['i:fid'] == 1],
'y': mag[pdf['i:fid'] == 1],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 1],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'name': 'g band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 1],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#1f77b4',
'symbol': 'o'}
},
{
'x': dates[pdf['i:fid'] == 2],
'y': mag[pdf['i:fid'] == 2],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 2],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'name': 'r band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 2],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#ff7f0e',
'symbol': 'o'}
}
],
"layout": layout_lightcurve
}
if switch == "Difference magnitude":
pdf_upper = pd.read_json(object_upper)
# <b>candid</b>: %{customdata[0]}<br> not available in index tables...
hovertemplate_upper = r"""
<b>diffmaglim</b>: %{y:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata}
<extra></extra>
"""
if not pdf_upper.empty:
dates2 = pdf_upper['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
figure['data'].append(
{
'x': dates2[pdf_upper['i:fid'] == 1],
'y': pdf_upper['i:diffmaglim'][pdf_upper['i:fid'] == 1],
'mode': 'markers',
'customdata': pdf_upper['i:jd'].apply(lambda x: x - 2400000.5)[pdf_upper['i:fid'] == 1],
'hovertemplate': hovertemplate_upper,
'marker': {
'color': '#1f77b4',
'symbol': 'triangle-down-open'
},
'showlegend': False
}
)
figure['data'].append(
{
'x': dates2[pdf_upper['i:fid'] == 2],
'y': pdf_upper['i:diffmaglim'][pdf_upper['i:fid'] == 2],
'mode': 'markers',
'customdata': pdf_upper['i:jd'].apply(lambda x: x - 2400000.5)[pdf_upper['i:fid'] == 2],
'hovertemplate': hovertemplate_upper,
'marker': {
'color': '#ff7f0e',
'symbol': 'triangle-down-open'
},
'showlegend': False
}
)
pdf_upperv = pd.read_json(object_uppervalid)
# <b>candid</b>: %{customdata[0]}<br> not available in index tables...
hovertemplate_upperv = r"""
<b>%{yaxis.title.text}</b>: %{y:.2f} ± %{error_y.array:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata}
<extra></extra>
"""
if not pdf_upperv.empty:
dates2 = pdf_upperv['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
mask = np.array([False if i in pdf['i:jd'].values else True for i in pdf_upperv['i:jd'].values])
dates2 = dates2[mask]
pdf_upperv = pdf_upperv[mask]
figure['data'].append(
{
'x': dates2[pdf_upperv['i:fid'] == 1],
'y': pdf_upperv['i:magpsf'][pdf_upperv['i:fid'] == 1],
'error_y': {
'type': 'data',
'array': pdf_upperv['i:sigmapsf'][pdf_upperv['i:fid'] == 1],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'customdata': pdf_upperv['i:jd'].apply(lambda x: x - 2400000.5)[pdf_upperv['i:fid'] == 1],
'hovertemplate': hovertemplate_upperv,
'marker': {
'color': '#1f77b4',
'symbol': 'triangle-up'
},
'showlegend': False
}
)
figure['data'].append(
{
'x': dates2[pdf_upperv['i:fid'] == 2],
'y': pdf_upperv['i:magpsf'][pdf_upperv['i:fid'] == 2],
'error_y': {
'type': 'data',
'array': pdf_upperv['i:sigmapsf'][pdf_upperv['i:fid'] == 2],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'customdata': pdf_upperv['i:jd'].apply(lambda x: x - 2400000.5)[pdf_upperv['i:fid'] == 2],
'hovertemplate': hovertemplate_upperv,
'marker': {
'color': '#ff7f0e',
'symbol': 'triangle-up'
},
'showlegend': False
}
)
return figure
@app.callback(
Output('lightcurve_scores', 'figure'),
[
Input('url', 'pathname'),
Input('object-data', 'children'),
Input('object-upper', 'children'),
Input('object-uppervalid', 'children')
])
def draw_lightcurve_sn(pathname: str, object_data, object_upper, object_uppervalid) -> dict:
""" Draw object lightcurve with errorbars (SM view - DC mag fixed)
Parameters
----------
pathname: str
Pathname of the current webpage (should be /ZTF19...).
Returns
----------
figure: dict
"""
pdf_ = pd.read_json(object_data)
cols = [
'i:jd', 'i:magpsf', 'i:sigmapsf', 'i:fid',
'i:magnr', 'i:sigmagnr', 'i:magzpsci', 'i:isdiffpos', 'i:candid'
]
pdf = pdf_.loc[:, cols]
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
# shortcuts
mag = pdf['i:magpsf']
err = pdf['i:sigmapsf']
# inplace replacement
mag, err = np.transpose(
[
dc_mag(*args) for args in zip(
pdf['i:fid'].values,
mag.astype(float).values,
err.astype(float).values,
pdf['i:magnr'].astype(float).values,
pdf['i:sigmagnr'].astype(float).values,
pdf['i:magzpsci'].astype(float).values,
pdf['i:isdiffpos'].values
)
]
)
layout_lightcurve['yaxis']['title'] = 'Apparent DC magnitude'
layout_lightcurve['yaxis']['autorange'] = 'reversed'
hovertemplate = r"""
<b>%{yaxis.title.text}</b>: %{y:.2f} ± %{error_y.array:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata}
<extra></extra>
"""
figure = {
'data': [
{
'x': dates[pdf['i:fid'] == 1],
'y': mag[pdf['i:fid'] == 1],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 1],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'name': 'g band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 1],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#1f77b4',
'symbol': 'o'}
},
{
'x': dates[pdf['i:fid'] == 2],
'y': mag[pdf['i:fid'] == 2],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 2],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'name': 'r band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 2],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#ff7f0e',
'symbol': 'o'}
}
],
"layout": layout_lightcurve
}
return figure
def draw_lightcurve_preview(name) -> dict:
""" Draw object lightcurve with errorbars (SM view - DC mag fixed)
Parameters
----------
pathname: str
Pathname of the current webpage (should be /ZTF19...).
Returns
----------
figure: dict
"""
cols = [
'i:jd', 'i:magpsf', 'i:sigmapsf', 'i:fid',
'i:magnr', 'i:sigmagnr', 'i:magzpsci', 'i:isdiffpos', 'i:candid'
]
r = requests.post(
'{}/api/v1/objects'.format(APIURL),
json={
'objectId': name,
'withupperlim': 'True',
'columns': ",".join(cols),
'output-format': 'json'
}
)
pdf_ = pd.read_json(r.content)
pdf = pdf_.loc[:, cols]
# Mask upper-limits (but keep measurements with bad quality)
mag_ = pdf['i:magpsf']
mask = ~np.isnan(mag_)
pdf = pdf[mask]
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
# shortcuts
mag = pdf['i:magpsf']
err = pdf['i:sigmapsf']
layout_lightcurve_preview['yaxis']['title'] = 'Difference magnitude'
layout_lightcurve_preview['yaxis']['autorange'] = 'reversed'
layout_lightcurve_preview['paper_bgcolor'] = 'rgba(0,0,0,0.0)'
layout_lightcurve_preview['plot_bgcolor'] = 'rgba(0,0,0,0.2)'
hovertemplate = r"""
<b>%{yaxis.title.text}</b>: %{y:.2f} ± %{error_y.array:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata}
<extra></extra>
"""
figure = {
'data': [
{
'x': dates[pdf['i:fid'] == 1],
'y': mag[pdf['i:fid'] == 1],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 1],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'name': 'g band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 1],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#1f77b4',
'symbol': 'o'}
},
{
'x': dates[pdf['i:fid'] == 2],
'y': mag[pdf['i:fid'] == 2],
'error_y': {
'type': 'data',
'array': err[pdf['i:fid'] == 2],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'name': 'r band',
'customdata': pdf['i:jd'].apply(lambda x: x - 2400000.5)[pdf['i:fid'] == 2],
'hovertemplate': hovertemplate,
'marker': {
'size': 12,
'color': '#ff7f0e',
'symbol': 'o'}
}
],
"layout": layout_lightcurve_preview
}
return figure
@app.callback(
Output('scores', 'figure'),
[
Input('object-data', 'children'),
])
def draw_scores(object_data) -> dict:
""" Draw scores from SNN module
Parameters
----------
pdf: pd.DataFrame
Results from a HBase client query
Returns
----------
figure: dict
TODO: memoise me
"""
pdf = pd.read_json(object_data)
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
hovertemplate = """
<b>%{customdata[0]}</b>: %{y:.2f}<br>
<b>%{xaxis.title.text}</b>: %{x|%Y/%m/%d %H:%M:%S.%L}<br>
<b>mjd</b>: %{customdata[1]}
<extra></extra>
"""
figure = {
'data': [
{
'x': dates,
'y': [0.5] * len(dates),
'mode': 'lines',
'showlegend': False,
'hoverinfo': 'skip',
'line': {
'color': 'black',
'width': 2.5,
'dash': 'dash'
}
},
{
'x': dates,
'y': pdf['d:snn_snia_vs_nonia'],
'mode': 'markers',
'name': 'SN Ia score',
'customdata': list(
zip(
['SN Ia score'] * len(pdf),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5),
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#2ca02c',
'symbol': 'circle'}
},
{
'x': dates,
'y': pdf['d:snn_sn_vs_all'],
'mode': 'markers',
'name': 'SNe score',
'customdata': list(
zip(
['SNe score'] * len(pdf),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5),
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#d62728',
'symbol': 'square'}
},
{
'x': dates,
'y': pdf['d:rfscore'],
'mode': 'markers',
'name': 'Early SN Ia score',
'customdata': list(
zip(
['Early SN Ia score'] * len(pdf),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5),
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#9467bd',
'symbol': 'diamond'}
}
],
"layout": layout_scores
}
return figure
@app.callback(
Output('colors', 'figure'),
[
Input('object-data', 'children'),
])
def draw_color(object_data) -> dict:
""" Draw color evolution
Parameters
----------
pdf: pd.DataFrame
Results from a HBase client query
Returns
----------
figure: dict
TODO: memoise me
"""
pdf = pd.read_json(object_data)
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
hovertemplate = """
<b>%{customdata[0]}</b>: %{y:.3f}<br>
<b>mjd</b>: %{customdata[1]}
<extra></extra>
"""
m1 = pdf['i:fid'] == 1
m2 = pdf['i:fid'] == 2
figure = {
'data': [
{
'x': dates,
'y': pdf['v:g-r'],
'mode': 'markers',
'name': 'delta g-r (mag)',
'customdata': list(
zip(
['delta g-r'] * len(pdf['i:jd']),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5),
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#2ca02c',
'symbol': 'circle'
}
},
{
'x': dates[m1],
'y': pdf['v:dg'][m1],
'mode': 'markers',
'name': 'delta g (mag)',
'customdata': list(
zip(
['delta g'] * len(pdf['i:jd'][m1]),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5)[m1],
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#d62728',
'symbol': 'square'
}
},
{
'x': dates[m2],
'y': pdf['v:dr'][m2],
'mode': 'markers',
'name': 'delta r (mag)',
'customdata': list(
zip(
['delta r'] * len(pdf['i:jd'][m2]),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5)[m2],
)
),
'hovertemplate': hovertemplate,
'marker': {
'size': 10,
'color': '#9467bd',
'symbol': 'diamond'
}
}
],
"layout": layout_colors
}
return figure
@app.callback(
Output('colors_rate', 'figure'),
[
Input('object-data', 'children'),
])
def draw_color_rate(object_data) -> dict:
""" Draw color rate
Parameters
----------
pdf: pd.DataFrame
Results from a HBase client query
Returns
----------
figure: dict
TODO: memoise me
"""
pdf = pd.read_json(object_data)
# type conversion
dates = pdf['i:jd'].apply(lambda x: convert_jd(float(x), to='iso'))
hovertemplate_rate = """
<b>%{customdata[0]} in mag/day</b>: %{y:.3f}<br>
<b>mjd</b>: %{customdata[1]}
<extra></extra>
"""
m1 = pdf['i:fid'] == 1
m2 = pdf['i:fid'] == 2
figure = {
'data': [
{
'x': dates,
'y': pdf['v:rate(g-r)'],
'mode': 'markers',
'name': 'rate g-r (mag/day)',
'customdata': list(
zip(
['rate(delta g)'] * len(pdf['i:jd']),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5),
)
),
'hovertemplate': hovertemplate_rate,
'marker': {
'size': 10,
'color': '#2ca02c',
'symbol': 'circle'
}
},
{
'x': dates[m1],
'y': pdf['v:rate(dg)'][m1],
'mode': 'markers',
'name': 'rate g (mag/day)',
'customdata': list(
zip(
['rate(delta g)'] * len(pdf['i:jd'][m1]),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5)[m1],
)
),
'hovertemplate': hovertemplate_rate,
'marker': {
'size': 10,
'color': '#d62728',
'symbol': 'square'
}
},
{
'x': dates[m2],
'y': pdf['v:rate(dr)'][m2],
'mode': 'markers',
'name': 'rate r (mag/day)',
'customdata': list(
zip(
['rate(delta r)'] * len(pdf['i:jd'][m2]),
pdf['i:jd'].apply(lambda x: float(x) - 2400000.5)[m2],
)
),
'hovertemplate': hovertemplate_rate,
'marker': {
'size': 10,
'color': '#9467bd',
'symbol': 'diamond'
}
},
],
"layout": layout_colors_rate
}
return figure
def extract_cutout(object_data, time0, kind):
""" Extract cutout data from the alert
Parameters
----------
object_data: json
Jsonified pandas DataFrame
time0: str
ISO time of the cutout to extract
kind: str
science, template, or difference
Returns
----------
data: np.array
2D array containing cutout data
"""
values = [
'i:jd',
'b:cutout{}_stampData'.format(kind.capitalize()),
]
pdf_ = pd.read_json(object_data)
pdfs = pdf_.loc[:, values]
pdfs = pdfs.sort_values('i:jd', ascending=False)
if time0 is None:
position = 0
else:
# Round to avoid numerical precision issues
jds = pdfs['i:jd'].apply(lambda x: np.round(x, 3)).values
jd0 = np.round(Time(time0, format='iso').jd, 3)
position = np.where(jds == jd0)[0][0]
# Grab the cutout data
cutout = readstamp(
client.repository().get(
pdfs['b:cutout{}_stampData'.format(kind.capitalize())].values[position]
)
)
return cutout
@app.callback(
Output("stamps", "children"),
[
Input('lightcurve_cutouts', 'clickData'),
Input('object-data', 'children'),
])
def draw_cutouts(clickData, object_data):
""" Draw cutouts data based on lightcurve data
"""
if clickData is not None:
jd0 = clickData['points'][0]['x']
else:
jd0 = None
figs = []
for kind in ['science', 'template', 'difference']:
try:
data = extract_cutout(object_data, jd0, kind=kind)
figs.append(draw_cutout(data, kind))
except OSError:
data = dcc.Markdown("Load fail, refresh the page")
figs.append(data)
return figs
@app.callback(
Output("stamps_mobile", "children"),
[
Input('object-data', 'children'),
Input('is-mobile', 'children')
])
def draw_cutouts_mobile(object_data, is_mobile):
""" Draw cutouts data based on lightcurve data
"""
figs = []
for kind in ['science', 'template', 'difference']:
try:
data = extract_cutout(object_data, None, kind=kind)
figs.append(draw_cutout(data, kind, is_mobile=is_mobile))
except OSError:
data = dcc.Markdown("Load fail, refresh the page")
figs.append(data)
return figs
def draw_cutouts_quickview(name):
""" Draw Science cutout data for the preview service
"""
figs = []
for kind in ['science']:
try:
# transfer only necessary columns
cols = [
'i:jd',
'b:cutout{}_stampData'.format(kind.capitalize()),
]
# Transfer cutout name data
r = requests.post(
'{}/api/v1/objects'.format(APIURL),
json={
'objectId': name,
'columns': ','.join(cols)
}
)
object_data = r.content
data = extract_cutout(object_data, None, kind=kind)
figs.append(draw_cutout(data, kind, is_mobile=True))
except OSError:
data = dcc.Markdown("Load fail, refresh the page")
figs.append(data)
return figs
def create_circular_mask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = (int(w/2), int(h/2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask
def sigmoid(img: list) -> list:
""" Sigmoid function used for img_normalizer
Parameters
-----------
img: float array
a float array representing a non-normalized image
Returns
-----------
out: float array
"""
# Compute mean and std of the image
img_mean, img_std = img.mean(), img.std()
# restore img to normal mean and std
img_normalize = (img - img_mean) / img_std
# image inversion
inv_norm = -img_normalize
# compute exponential of inv img
exp_norm = np.exp(inv_norm)
# perform sigmoid calculation and return it
return 1 / (1 + exp_norm)
def sigmoid_normalizer(img: list, vmin: float, vmax: float) -> list:
""" Image normalisation between vmin and vmax using Sigmoid function
Parameters
-----------
img: float array
a float array representing a non-normalized image
Returns
-----------
out: float array where data are bounded between vmin and vmax
"""
return (vmax - vmin) * sigmoid(img) + vmin
def legacy_normalizer(data: list, stretch='asinh', pmin=0.5, pmax=99.5) -> list:
""" Old cutout normalizer which use the central pixel
Parameters
-----------
data: float array
a float array representing a non-normalized image
Returns
-----------
out: float array where data are bouded between vmin and vmax
"""
size = len(data)
vmax = data[int(size / 2), int(size / 2)]
vmin = np.min(data) + 0.2 * np.median(np.abs(data - np.median(data)))
return _data_stretch(data, vmin=vmin, vmax=vmax, pmin=pmin, pmax=pmax, stretch=stretch)
def draw_cutout(data, title, lower_bound=0, upper_bound=1, is_mobile=False):
""" Draw a cutout data
"""
# Update graph data for stamps
data = np.nan_to_num(data)
data = sigmoid_normalizer(data, lower_bound, upper_bound)
data = data[::-1]
data = convolve(data, smooth=1, kernel='gauss')
if is_mobile:
mask = create_circular_mask(len(data), len(data[0]), center=None, radius=None)
data[~mask] = np.nan
if is_mobile:
zsmooth = 'fast'
else:
zsmooth = False
fig = go.Figure(
data=go.Heatmap(
z=data, showscale=False, hoverinfo='skip', colorscale='Greys_r', zsmooth=zsmooth
)
)
# Greys_r
axis_template = dict(
autorange=True,
showgrid=False, zeroline=False,
linecolor='black', showticklabels=False,
ticks='')
fig.update_layout(
title='',
margin=dict(t=0, r=0, b=0, l=0),
xaxis=axis_template,
yaxis=axis_template,
showlegend=True,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
if not is_mobile:
fig.update_layout(width=150, height=150)
style = {'display': 'inline-block', 'height': '10pc', 'width': '10pc'}
else:
style = {'display': 'inline-block', 'height': '5pc', 'width': '5pc'}
graph = dcc.Graph(
id='{}-stamps'.format(title),
figure=fig,
style=style,
config={'displayModeBar': False}
)
return graph
@app.callback(
Output('variable_plot', 'children'),
[
Input('nterms_base', 'value'),
Input('nterms_band', 'value'),
Input('manual_period', 'value'),
Input('submit_variable', 'n_clicks'),
Input('object-data', 'children')
])
def plot_variable_star(nterms_base, nterms_band, manual_period, n_clicks, object_data):
""" Fit for the period of a star using gatspy
See https://zenodo.org/record/47887
See https://ui.adsabs.harvard.edu/abs/2015ApJ...812...18V/abstract
TODO: clean me
"""
if type(nterms_base) not in [int]:
return {'data': [], "layout": layout_phase}
if type(nterms_band) not in [int]:
return {'data': [], "layout": layout_phase}
if manual_period is not None and type(manual_period) not in [int, float]:
return {'data': [], "layout": layout_phase}
if n_clicks is not None:
pdf_ = pd.read_json(object_data)
cols = [
'i:jd', 'i:magpsf', 'i:sigmapsf', 'i:fid',
'i:magnr', 'i:sigmagnr', 'i:magzpsci', 'i:isdiffpos', 'i:objectId'
]
pdf = pdf_.loc[:, cols]
pdf['i:fid'] = pdf['i:fid'].astype(str)
pdf = pdf.sort_values('i:jd', ascending=False)
mag_dc, err_dc = np.transpose(
[
dc_mag(*args) for args in zip(
pdf['i:fid'].astype(int).values,
pdf['i:magpsf'].astype(float).values,
pdf['i:sigmapsf'].astype(float).values,
pdf['i:magnr'].astype(float).values,
pdf['i:sigmagnr'].astype(float).values,
pdf['i:magzpsci'].astype(float).values,
pdf['i:isdiffpos'].values
)
]
)
jd = pdf['i:jd']
fit_period = False if manual_period is not None else True
model = periodic.LombScargleMultiband(
Nterms_base=int(nterms_base),
Nterms_band=int(nterms_band),
fit_period=fit_period
)
# Not sure about that...
model.optimizer.period_range = (0.1, 1.2)
model.optimizer.quiet = True
model.fit(
jd.astype(float),
mag_dc,
err_dc,
pdf['i:fid'].astype(int)
)
if fit_period:
period = model.best_period
else:
period = manual_period
phase = jd.astype(float).values % period
tfit = np.linspace(0, period, 100)
layout_phase_ = copy.deepcopy(layout_phase)
layout_phase_['title']['text'] = 'Period: {} days - score: {:.2f}'.format(period, model.score(period))
if '1' in np.unique(pdf['i:fid'].values):
plot_filt1 = {
'x': phase[pdf['i:fid'] == '1'],
'y': mag_dc[pdf['i:fid'] == '1'],
'error_y': {
'type': 'data',
'array': err_dc[pdf['i:fid'] == '1'],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'name': 'g band',
'text': phase[pdf['i:fid'] == '1'],
'marker': {
'size': 12,
'color': '#1f77b4',
'symbol': 'o'}
}
fit_filt1 = {
'x': tfit,
'y': model.predict(tfit, period=period, filts=1),
'mode': 'lines',
'name': 'fit g band',
'showlegend': False,
'line': {
'color': '#1f77b4',
}
}
else:
plot_filt1 = {}
fit_filt1 = {}
if '2' in np.unique(pdf['i:fid'].values):
plot_filt2 = {
'x': phase[pdf['i:fid'] == '2'],
'y': mag_dc[pdf['i:fid'] == '2'],
'error_y': {
'type': 'data',
'array': err_dc[pdf['i:fid'] == '2'],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'name': 'r band',
'text': phase[pdf['i:fid'] == '2'],
'marker': {
'size': 12,
'color': '#ff7f0e',
'symbol': 'o'}
}
fit_filt2 = {
'x': tfit,
'y': model.predict(tfit, period=period, filts=2),
'mode': 'lines',
'name': 'fit r band',
'showlegend': False,
'line': {
'color': '#ff7f0e',
}
}
else:
plot_filt2 = {}
fit_filt2 = {}
figure = {
'data': [
plot_filt1,
fit_filt1,
plot_filt2,
fit_filt2
],
"layout": layout_phase_
}
graph = dcc.Graph(
figure=figure,
style={
'width': '100%',
'height': '25pc'
},
config={'displayModeBar': False}
)
card = dbc.Card(
dbc.CardBody(graph),
className="mt-3"
)
return card
# quite referentially opaque...
return ""
@app.callback(
[
Output('mulens_plot', 'children'),
Output('mulens_params', 'children'),
],
[
Input('submit_mulens', 'n_clicks'),
Input('object-data', 'children')
])
def plot_mulens(n_clicks, object_data):
""" Fit for microlensing event
TODO: implement a fit using pyLIMA
"""
if n_clicks is not None:
pdf_ = pd.read_json(object_data)
cols = [
'i:jd', 'i:magpsf', 'i:sigmapsf', 'i:fid', 'i:ra', 'i:dec',
'i:magnr', 'i:sigmagnr', 'i:magzpsci', 'i:isdiffpos', 'i:objectId'
]
pdf = pdf_.loc[:, cols]
pdf['i:fid'] = pdf['i:fid'].astype(str)
pdf = pdf.sort_values('i:jd', ascending=False)
mag_dc, err_dc = np.transpose(
[
dc_mag(*args) for args in zip(
pdf['i:fid'].astype(int).values,
pdf['i:magpsf'].astype(float).values,
pdf['i:sigmapsf'].astype(float).values,
pdf['i:magnr'].astype(float).values,
pdf['i:sigmagnr'].astype(float).values,
pdf['i:magzpsci'].astype(float).values,
pdf['i:isdiffpos'].values
)
]
)
current_event = event.Event()
current_event.name = pdf['i:objectId'].values[0]
current_event.ra = pdf['i:ra'].values[0]
current_event.dec = pdf['i:dec'].values[0]
filts = {'1': 'g', '2': 'r'}
for fid in np.unique(pdf['i:fid'].values):
mask = pdf['i:fid'].values == fid
telescope = telescopes.Telescope(
name='ztf_{}'.format(filts[fid]),
camera_filter=format(filts[fid]),
light_curve_magnitude=np.transpose(
[
pdf['i:jd'].values[mask],
mag_dc[mask],
err_dc[mask]
]
),
light_curve_magnitude_dictionnary={
'time': 0,
'mag': 1,
'err_mag': 2
}
)
current_event.telescopes.append(telescope)
# Le modele le plus simple
mulens_model = microlmodels.create_model('PSPL', current_event)
current_event.fit(mulens_model, 'DE')
# 4 parameters
dof = len(pdf) - 4 - 1
results = current_event.fits[0]
normalised_lightcurves = microltoolbox.align_the_data_to_the_reference_telescope(results, 0, results.fit_results)
# Model
create_the_fake_telescopes(results, results.fit_results)
telescope_ = results.event.fake_telescopes[0]
flux_model = mulens_model.compute_the_microlensing_model(telescope_, results.model.compute_pyLIMA_parameters(results.fit_results))[0]
time = telescope_.lightcurve_flux[:, 0]
magnitude = microltoolbox.flux_to_magnitude(flux_model)
if '1' in np.unique(pdf['i:fid'].values):
plot_filt1 = {
'x': [convert_jd(t, to='iso') for t in normalised_lightcurves[0][:, 0]],
'y': normalised_lightcurves[0][:, 1],
'error_y': {
'type': 'data',
'array': normalised_lightcurves[0][:, 2],
'visible': True,
'color': '#1f77b4'
},
'mode': 'markers',
'name': 'g band',
'text': [convert_jd(t, to='iso') for t in normalised_lightcurves[0][:, 0]],
'marker': {
'size': 12,
'color': '#1f77b4',
'symbol': 'o'}
}
else:
plot_filt1 = {}
if '2' in np.unique(pdf['i:fid'].values):
# only filter r
if len(np.unique(pdf['i:fid'].values)) == 1:
index = 0
else:
index = 1
plot_filt2 = {
'x': [convert_jd(t, to='iso') for t in normalised_lightcurves[index][:, 0]],
'y': normalised_lightcurves[index][:, 1],
'error_y': {
'type': 'data',
'array': normalised_lightcurves[index][:, 2],
'visible': True,
'color': '#ff7f0e'
},
'mode': 'markers',
'name': 'r band',
'text': [convert_jd(t, to='iso') for t in normalised_lightcurves[index][:, 0]],
'marker': {
'size': 12,
'color': '#ff7f0e',
'symbol': 'o'}
}
else:
plot_filt2 = {}
fit_filt = {
'x': [convert_jd(float(t), to='iso') for t in time],
'y': magnitude,
'mode': 'lines',
'name': 'fit',
'showlegend': False,
'line': {
'color': '#7f7f7f',
}
}
figure = {
'data': [
fit_filt,
plot_filt1,
plot_filt2
],
"layout": layout_mulens
}
if sum([len(i) for i in figure['data']]) > 0:
graph = dcc.Graph(
figure=figure,
style={
'width': '100%',
'height': '25pc'
},
config={'displayModeBar': False}
)
else:
graph = ""
# fitted parameters
names = results.model.model_dictionnary
params = results.fit_results
err = np.diag(np.sqrt(results.fit_covariance))
mulens_params = """
```python
# Fitted parameters
t0: {} +/- {} (jd)
tE: {} +/- {} (days)
u0: {} +/- {}
chi2/dof: {}
```
---
""".format(
params[names['to']],
err[names['to']],
params[names['tE']],
err[names['tE']],
params[names['uo']],
err[names['uo']],
params[-1] / dof
)
card = dbc.Card(
dbc.CardBody(graph),
className="mt-3"
)
return card, mulens_params
mulens_params = """
```python
# Fitted parameters
t0: None
tE: None
u0: None
chi2: None
```
---
"""
return "", mulens_params
@app.callback(
Output('aladin-lite-div', 'run'), Input('object-data', 'children'))
def integrate_aladin_lite(object_data):
""" Integrate aladin light in the 2nd Tab of the dashboard.
the default parameters are:
* PanSTARRS colors
* FoV = 0.02 deg
* SIMBAD catalig overlayed.
Callbacks
----------
Input: takes the alert ID
Output: Display a sky image around the alert position from aladin.
Parameters
----------
alert_id: str
ID of the alert
"""
pdf_ = pd.read_json(object_data)
cols = ['i:jd', 'i:ra', 'i:dec']
pdf = pdf_.loc[:, cols]
pdf = pdf.sort_values('i:jd', ascending=False)
# Coordinate of the current alert
ra0 = pdf['i:ra'].values[0]
dec0 = pdf['i:dec'].values[0]
# Javascript. Note the use {{}} for dictionary
img = """
var aladin = A.aladin('#aladin-lite-div',
{{
survey: 'P/PanSTARRS/DR1/color/z/zg/g',
fov: 0.025,
target: '{} {}',
reticleColor: '#ff89ff',
reticleSize: 32
}});
var cat = 'https://axel.u-strasbg.fr/HiPSCatService/Simbad';
var hips = A.catalogHiPS(cat, {{onClick: 'showTable', name: 'Simbad'}});
aladin.addCatalog(hips);
""".format(ra0, dec0)
# img cannot be executed directly because of formatting
# We split line-by-line and remove comments
img_to_show = [i for i in img.split('\n') if '// ' not in i]
return " ".join(img_to_show)
@app.callback(
Output('sso_lightcurve', 'children'),
[
Input('url', 'pathname'),
Input('object-sso', 'children')
])
def draw_sso_lightcurve(pathname: str, object_sso) -> dict:
""" Draw SSO object lightcurve with errorbars
Parameters
----------
pathname: str
Pathname of the current webpage (should be /ZTF19...).
Returns
----------
figure: dict
"""
pdf = | pd.read_json(object_sso) | pandas.read_json |
import numpy as np
import pandas as pd
import re
#import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import pickle
#Analysing Data
data = pd.read_csv("Scraped-Data/dataset_clean.csv", encoding ="ISO-8859-1")
df = pd.DataFrame(data)
df_1 = pd.get_dummies(df.Target)
df_s = df['Source']
df_pivoted = pd.concat([df_s,df_1], axis=1)
df_pivoted.drop_duplicates(keep='first',inplace=True)
df_pivoted[:5]
cols = df_pivoted.columns
cols = cols[1:]
df_pivoted = df_pivoted.groupby('Source').sum()
df_pivoted = df_pivoted.reset_index()
df_pivoted[:5]
df_pivoted.to_csv("Scraped-Data/df_pivoted.csv")
x = df_pivoted[cols]
y = df_pivoted['Source']
#print(len(df_pivoted))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
mnb = MultinomialNB()
mnb = mnb.fit(x_train, y_train)
#print(mnb.score(x_test, y_test))
mnb_tot = MultinomialNB()
mnb_tot = mnb_tot.fit(x, y)
print(mnb_tot.score(x, y))
disease_pred = mnb_tot.predict(x)
disease_real = y.values
for i in range(0, len(disease_real)):
if disease_pred[i]!=disease_real[i]:
print ('Pred: {0} Actual:{1}'.format(disease_pred[i], disease_real[i]))
print ("DecisionTree")
dt = DecisionTreeClassifier()
clf_dt=dt.fit(x,y)
print ("Acurracy: ", clf_dt.score(x,y))
data = | pd.read_csv("Manual-Data/Training.csv") | pandas.read_csv |
"""
This file was used to manually but quickly label all the source csv files.
It shows a schema, asks if you actually want to use this schema (you can discard it if it is not usable).
It then asks per column if you want to use/discard the column, and what the new column name should be.
All csv files are then stored in a folder.
"""
import os
from schema_matching import *
import sys
import pandas as pd
from subprocess import call
from os.path import isfile
from io import StringIO
class Dataset_Labeler():
"""
Manually label a dataset.
We track the used classes and ask for confirmation if you want to add a new one, we do this because
mistakes can happen, and thats okay.
"""
def __init__(self, path_to_batcher="../batchtool/cases", storage="all_csv_labeled/", path_to_input="/input/"):
self.path_to_batcher = path_to_batcher
self.storage_folder = storage
self.path_to_input = path_to_input
self.used_cases_filename = "used_cases"
self.classes_filename = "classes"
self.sr = Schema_Reader()
try:
self.classes = read_file(storage + "system/" + self.classes_filename)
except:
self.classes = ['city', 'postcode', 'kvk_number', 'address', 'company_name',
'email', 'telephone_nr', 'domain_name', 'bik', 'legal_type', 'sbi_code', 'house_number', 'addition']
try:
self.used_cases = read_file(storage + "system/" + self.used_cases_filename)
except:
self.used_cases = []
def label_dataset(self):
"""
Loop through the folders and get the cases
"""
for foldername in os.listdir(self.path_to_batcher):
if foldername not in self.used_cases:
path_to_batch_input = self.path_to_batcher + "/" + foldername + self.path_to_input
try:
self.handle_schema_folder(path_to_batch_input, foldername)
except FileNotFoundError:
print("Input not found for: ")
print(foldername)
def handle_schema_folder(self, path_to_batch_input, batch_name):
"""
Manually label a case and store it
"""
for filename in os.listdir(path_to_batch_input):
if filename[-4::].lower() == ".csv":
schema_path = path_to_batch_input + filename
try:
headers, columns = self.get_schema(schema_path)
use_schema = self.show_schema(self.storage_folder + "system/tmp.csv")
if use_schema:
headers, columns = self.modify_schema(headers, columns)
self.store_schema(headers, columns, batch_name)
except UnicodeDecodeError:
pass
self.used_cases.append(batch_name)
self.used_cases = list(set(self.used_cases))
store_file(self.storage_folder + "system/" + self.used_cases_filename, self.used_cases)
store_file(self.storage_folder + "system/" + self.classes_filename, self.classes)
def modify_schema(self, headers, columns):
"""
Classify every column of the schema, and return the new headers with their columns.
Check if the class you inputted was has already been used, otherwise add it.
"""
new_headers = []
new_columns = []
for i in range(0, len(headers)):
column = columns[i]
header = headers[i]
print_list(reversed(column))
print("Header: " + str(header))
print("length: " + str(len(column)))
print(self.classes)
while True:
text = input("What will the new header name be?\n")
if text in self.classes:
new_headers.append(text)
new_columns.append(column)
break
elif text == 's':
break
else:
text2 = input("Class not in classes, was it a mistake or do you want to add it? (a)\n")
if text2 == 'a':
self.classes.append(text)
new_headers.append(text)
new_columns.append(column)
break
return new_headers, new_columns
def show_schema(self, path):
"""
Show and validate schema
"""
command = "tad " + path
call(command, shell=True)
text = ""
while True:
text = str(input("Do you want to use this schema? (y/n)\n"))
if text == 'y' or text == 'n':
break
print("Wrong input, try again")
if text == 'y':
return True
elif text == 'n':
return False
def get_schema(self, schema_path):
"""
Read the input schema
"""
headers, columns = self.sr.get_duplicate_columns(schema_path)
col_dict = self.convert_to_dict(columns)
df = pd.DataFrame(col_dict)
csv = df.to_csv(header=headers, index=False)
# Temporarily store the schema for viewing purposes
store_file(self.storage_folder + "system/tmp.csv", csv)
return headers, columns
def convert_to_dict(self, columns):
"""
Convert the list of columns to a dictionary so pandas can store it
"""
result = {}
max_length = 0
for i in range(0, len(columns)):
result[i] = columns[i]
if len(columns[i]) > max_length:
max_length = len(columns[i])
# fill the list with empty entries so we can use the pandas dataframe
for res in result:
col = result[res]
addition = max_length - len(col)
col += [""] * addition
return result
def store_schema(self, headers, columns, batch_name):
"""
Store the classified schema and check in the list if the batch name hasnt already been used.
"""
col_dict = self.convert_to_dict(columns)
df = | pd.DataFrame(col_dict) | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
res = pd.to_numeric(pd.Series(idx, name="xxx"))
tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
pd.to_numeric(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
# Basic dtype support.
smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0])
# Support below np.float32 is rare and far between.
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast="float")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_signed_downcast(self, data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data(self):
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors="ignore",
downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned(self):
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize("data,expected", [
(["1.1", 2, 3],
np.array([1.1, 2, 3], dtype=np.float64)),
([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
])
def test_ignore_downcast_cannot_convert_float(
self, data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast,expected_dtype", [
("integer", np.int16),
("signed", np.int16),
("unsigned", np.uint16)
])
def test_downcast_not8bit(self, downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("dtype,downcast,min_max", [
("int8", "integer", [iinfo(np.int8).min,
iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int64).min,
iinfo(np.int64).max]),
('uint8', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max]),
('uint16', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max]),
('uint32', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max]),
('uint64', "unsigned", [iinfo(np.uint64).min,
iinfo(np.uint64).max]),
('int16', "integer", [iinfo(np.int8).min,
iinfo(np.int8).max + 1]),
('int32', "integer", [iinfo(np.int16).min,
iinfo(np.int16).max + 1]),
('int64', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max + 1]),
('int16', "integer", [iinfo(np.int8).min - 1,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int16).min - 1,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int32).min - 1,
iinfo(np.int64).max]),
('uint16', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max + 1]),
('uint32', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max + 1]),
('uint64', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max + 1])
])
def test_downcast_limits(self, dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
def test_coerce_uint64_conflict(self):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]})
expected = pd.Series([200, 300, np.nan, np.nan,
30000000000000000000], dtype=float, name="a")
result = to_numeric(df["a"], errors="coerce")
tm.assert_series_equal(result, expected)
s = pd.Series(["12345678901234567890", "1234567890", "ITEM"])
expected = pd.Series([12345678901234567890,
1234567890, np.nan], dtype=float)
result = | to_numeric(s, errors="coerce") | pandas.to_numeric |
#%%
import requests
import json
import pandas as pd
import cx_Oracle
import configparser
from datetime import datetime
from bs4 import BeautifulSoup
# Function to write SQL batch errors to a log file
def log_error(batch, process):
if not os.path.exists('./log'):
os.makedirs('./log')
if len(batch) > 0:
f = open(f'./log/{process}.txt', 'a')
for error in batch:
f.write(f'{datetime.now()} error, {error.message}, "at row offset, {error.offset}\n')
f.close()
print(f'Log file written with {len(batch)} errors to ./log/{process}.txt')
# Connect to Oracle Autonomous Data Warehouse using Wallet and local config file for user/pw storage
cx_Oracle.init_oracle_client(lib_dir=".venv/instantclient_19_10")
config = configparser.ConfigParser()
config.read('./auth/config.ini')
username = config.get('oracle', 'username')
password = config.get('oracle', 'password')
connection = cx_Oracle.connect(username, password, '<PASSWORD>')
cur = connection.cursor()
# Get Citibike station information
cb_url = 'https://gbfs.citibikenyc.com/gbfs/en/station_information.json'
cb_r = requests.get(cb_url)
cb_raw_data = json.loads(cb_r.text)['data']['stations']
# Convert Citibike JSON data into dataframe
station_dict = {index: station for index, station in enumerate(cb_raw_data)}
station_df = pd.DataFrame.from_dict(station_dict, orient='index')
# Clean and reorganize the station dataframe
station_df = station_df[['station_id', 'name', 'lat', 'lon']]
station_df['station_id'] = pd.to_numeric(station_df['station_id'], errors='coerce')
# Load Google Map API key
gg_api = config.get('google', 'api')
gg_url = 'https://maps.googleapis.com/maps/api/geocode/json'
# Create a list of lat/long pair
coord = (station_df['lat'].astype(str) + ',' + station_df['lon'].astype(str)).tolist()
zipcodes = []
# This variable is used to fitler the API result from Google for zip code, neighborhood, borough, city, county, and state, respectively
filter_results = 'postal_code|neighborhood|sublocality|locality|administrative_area_level_2, administrative_area_level_1'
city_df = pd.DataFrame(columns=['zipcode', 'neighborhood', 'borough', 'city', 'county', 'state'])
# Loop goes through each lat/long pair to find the zip code, and its respective location information
for row, pair in enumerate(coord):
# Google Map reverse geocode API URL
gg_r = requests.get(f'{gg_url}?latlng={pair}&key={gg_api}&results={filter_results}')
geocode = json.loads(gg_r.text)
# These are flag variables created to check if data is found in the Google API request
postalcode = False
neighborhood = False
borough = False
city = False
county = False
state = False
all_flag = False
# A temp dataframe is used to store the relevant information
# If the zip code is not in the city_df, then it will be appended with the temp_df
temp_df = | pd.DataFrame(columns=['zipcode', 'neighborhood', 'borough', 'city', 'county', 'state']) | pandas.DataFrame |
#python imports
import os
import gc
import string
import random
import time
import pickle
import shutil
from datetime import datetime
#internal imports
from modules.Signal import Signal
from modules.Database import Database
from modules.Predictor import Classifier, ComplexBuilder
from modules.utils import calculateDistanceP, chunks, cleanPath, minMaxNorm, extractMeanByBounds, extractMetricByShiftBounds
import joblib
from joblib import Parallel, delayed, dump, load
import pandas as pd
import numpy as np
from collections import OrderedDict
from itertools import combinations
from multiprocessing import Pool, Value
from joblib import wrap_non_picklable_objects
#plotting
import matplotlib.pyplot as plt
import seaborn as sns
#sklearn imports
from sklearn.metrics import classification_report, homogeneity_score, v_measure_score, completeness_score
from sklearn.model_selection import ParameterGrid
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import RadiusNeighborsRegressor, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale, minmax_scale, robust_scale
from scipy.stats import ttest_ind, f_oneway
#dimensional reduction
import umap
__VERSION__ = "0.4.48"
filePath = os.path.dirname(os.path.realpath(__file__))
pathToTmp = os.path.join(filePath,"tmp")
alignModels = { "LinearRegression": LinearRegression,
"RadiusNeighborsRegressor" : RadiusNeighborsRegressor,
"KNeighborsRegressor":KNeighborsRegressor
}
alignModelsParams = {
"LinearRegression": {},
"RadiusNeighborsRegressor" : {"weights":"distance","radius":30} ,
"KNeighborsRegressor":{"weights":"distance","n_neighbors":10}
}
STACKING_CLASSIFIER_GRID = {
'rf__max_depth': [70,None,30],#30,,
#'rf__max_features': ['auto'],
# 'rf__min_samples_leaf': [2, 3, 5],
'rf__min_samples_split': [2,4],#[2, 3, 4],
#'rf__n_estimators': [200],
"SVM__C" : [1, 10,1000],
"SVM__kernel": ['rbf','poly'],
'SVM__gamma': [0.01,10,100]
}
OPTICS_PARAM_GRID = {
"min_samples":[2,3,5,8],
"max_eps": [np.inf,2,1,0.9,0.8],
"xi": np.linspace(0,0.3,num=30),
"cluster_method" : ["xi"]
}
AGGLO_PARAM_GRID = {
"n_clusters":[None,115,110,105,100,90,95],
"distance_threshold":[None,0.5,0.4,0.2,0.1,0.05,0.01],
"linkage":["complete","single","average"]
}
AFF_PRO_PARAM = {"damping":np.linspace(0.5,1,num=50)}
HDBSCAN_PROPS = {
"min_cluster_size":[2,3,4,6],
"min_samples":[2,3,4,5]
}
#{"min_cluster_size":[2,3,4,6],"min_samples":[2,3,4,5,8,10]}
CLUSTER_PARAMS = {
"OPTICS":OPTICS_PARAM_GRID,
"AGGLOMERATIVE_CLUSTERING":AGGLO_PARAM_GRID,
"AFFINITY_PROPAGATION":AFF_PRO_PARAM,
"HDBSCAN":HDBSCAN_PROPS
}
svm_param_grid = {
'C': [1, 10, 100, 1000],
'kernel': ['linear','rbf','poly'],
'gamma': [0.01,0.1,1,2,3,4,5]
}
RF_GRID_SEARCH = {
'max_depth': [70,None,30,50,10],#30,,,50,5
'max_features': ['auto'],
'min_samples_leaf': [2,5,3,15], # 5, 15
'min_samples_split': [2 ,3,10],
'n_estimators': [300, 500, 600]
}
entriesInChunks = dict()
class ComplexFinder(object):
def __init__(self,
addImpurity = 0.0,
alignMethod = "RadiusNeighborsRegressor",#"RadiusNeighborsRegressor",#"KNeighborsRegressor",#"LinearRegression", # RadiusNeighborsRegressor
alignRuns = False,
alignWindow = 3,
allowSingleFractionQuant = False,
analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
analysisName = None,
binaryDatabase = False,
classifierClass = "random_forest",
classifierTestSize = 0.25,
classiferGridSearch = RF_GRID_SEARCH,#STACKING_CLASSIFIER_GRID,#
compTabFormat = False,
considerOnlyInteractionsPresentInAllRuns = 2,
correlationWindowSize = 5,
databaseFilter = {'Organism': ["Human"]},#{'Organism': ["Human"]},#{"Confidence" : [1,2,3,4]} - for hu.map2.0,# {} for HUMAN_COMPLEX_PORTAL
databaseIDColumn = "subunits(UniProt IDs)",
databaseFileName = "20190823_CORUM.txt",#"humap2.txt
databaseHasComplexAnnotations = True,
databaseEntrySplitString = ";",
decoySizeFactor = 1.2,
grouping = {"WT": ["D3_WT_03.txt"]},
hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
indexIsID = False,
idColumn = "Uniprot ID",
interactionProbabCutoff = 0.7,
justFitAndMatchPeaks = False,
keepOnlySignalsValidInAllConditions = False,
kFold = 3,
maxPeaksPerSignal = 15,
maxPeakCenterDifference = 1.8,
metrices = ["apex","pearson","euclidean","cosine","max_location","rollingCorrelation"], #"umap-dist"
metricesForPrediction = None,#["pearson","euclidean","apex"],
metricQuantileCutoff = 0.001,
minDistanceBetweenTwoPeaks = 3,
minimumPPsPerFeature = 6,
minPeakHeightOfMax = 0.05,
n_jobs = 12,
noDatabaseForPredictions = False,
normValueDict = {},
noDistanceCalculationAndPrediction = False,
peakModel = "LorentzianModel",#"GaussianModel",#"SkewedGaussianModel",#"LorentzianModel",
plotSignalProfiles = False,
plotComplexProfiles = False,
precision = 0.5,
r2Thresh = 0.85,
removeSingleDataPointPeaks = True,
restartAnalysis = False,
retrainClassifier = False,
recalculateDistance = False,
rollingWinType = None,
runName = None,
scaleRawDataBeforeDimensionalReduction = True,
smoothSignal = True,
smoothWindow = 2,
takeRondomSampleFromData =False,
topNCorrFeaturesForUMAPAlignment = 200,
TMTPoolMethod = "sum",
transformQuantDataBy = None,
useRawDataForDimensionalReduction = False,
useFWHMForQuant = True,
umapDefaultKwargs = {"min_dist":0.001,"n_neighbors":5,"n_components":2,"random_state":120},
quantFiles = [],
usePeakCentricFeatures = False
):
"""
Init ComplexFinder Class
Parameters
----------
* alignMethod = "RadiusNeighborsRegressor",
* alignRuns = False,
Alignment of runs is based on signal profiles that were found to have
a single modelled peak. A refrence run is assign by correlation anaylsis
and choosen based on a maximum R2 value. Then fraction-shifts per signal
profile is calculated (must be in the window given by *alignWindow*).
The fraction residuals are then modelled using the method provided in
*alignMethod*. Model peak centers are then adjusted based on the regression results.
Of note, the alignment is performed after peak-modelling and before distance calculations.
* alignWindow = 3,
Number of fraction +/- single-peal profile are accepted for the run alignment.
* analysisMode = "label-free", #[label-free,SILAC,SILAC-TMT]
* analysisName = None,
* binaryDatabase = False,
* classifierClass = "random_forest",
* classifierTestSize = 0.25,
Fraction of the created database containing positive and negative protein-protein
interactions that will be used for testing (for example ROC curve analysis) and classification report.
* classiferGridSearch = RF_GRID_SEARCH.
Dict with keywords matching parameters/settings of estimator (SVM, random forest)
and list of values forming the grid used to find the best estimator settings (evaluated
by k-fold cross validation). Runtime is effected by number of parameter settings as well as k-fold.
* compTabFormat = False
True indicates that the data are in the CompBat data format which was recently introduced.
In contrast to standard txt files generated by for example MaxQuant. It contains multiple
headers. More information can be found here https://www3.cmbi.umcn.nl/cedar/browse/comptab
ComplexFinder will try to identifiy the samples and fractions and create separeted txt files.
* considerOnlyInteractionsPresentInAllRuns = 2,
Can be either bool to filter for protein - protein interactions that are present
in all runs. If an integer is provided. the pp interactions are filtered based on
the number of runs in which they were quantified. A value of 4 would indicate that
the pp interaction must have been predicted in all runs.
* correlationWindowSize = 5,
Number of fractions used for rolling pearson correlation
* databaseFilter = {'Organism': ["Human"]},
Filter dict used to find relevant complexes from database. By default,
the corum database is filtered based on the column 'Organism' using 'Mouse' as a search string.
If no filtering is required, pass an empty dict {}.
* databaseIDColumn = "subunits(UniProt IDs)",
* databaseFileName = "20190823_CORUM.txt",
* databaseHasComplexAnnotations = True,
Indicates if the provided database does contain complex annotations. If you have a database with
only pairwise interactions, this setting should be *False*. Clusters are identified by dimensional
reduction and density based clustering (HDBSCAN). In order to alter UMAP and HDBSCAN settings use the
kewywords *hdbscanDefaultKwargs* and *umapDefaultKwargs*.
* databaseEntrySplitString = ";",
String by which complex members are separated in the provided database. CORUM = ";", Embl ComplexMap = "|"
* decoySizeFactor = 1.2,
Size factor for creating the decoy database from the positive proterin connectivity database such as CORUM.
* grouping = {"WT": ["D3_WT_04.txt","D3_WT_02.txt"],"KO":["D3_KO_01.txt","D3_KO_02.txt"]},
None or dict. Indicates which samples (file) belong to one group. Let's assume 4 files with the name
'KO_01.txt', 'KO_02.txt', 'WT_01.txt' and 'WT_02.txt' are being analysed.
The grouping dict should like this : {"KO":[KO_01.txt','KO_02.txt'],"WT":['WT_01.txt','WT_02.txt']}
in order to combine them for statistical testing (e.g. t-test of log2 transformed peak-AUCs).
Note that when analysis multiple runs (e.g. grouping present) then calling ComplexFinder().run(X) - X must be a
path to a folder containing the files.
When using compTabFormat = True. Provide the sample name as <compTabFileName>:<SampleName>.
* hdbscanDefaultKwargs = {"min_cluster_size":4,"min_samples":1},
* indexIsID = False,
* idColumn = "Uniprot ID",
* interactionProbabCutoff = 0.7
Cutoff for estimator probability. Interactions with probabilities below threshold will be removed.
* keepOnlySignalsValidInAllConditions = False
If True, removes all Signals that were not found to be valid in all files (experiments).
* kFold = 3
Cross validation of classifier optimization.
* justFitAndMatchPeaks = False
If true, the pipeline stops after peak detection/model fitting and matching of peaks (if more than one file is supplied.)
* maxPeaksPerSignal = 15
Number of peaks allowed for on signal profile.
* maxPeakCenterDifference = 1.8
* metrices = ["apex","pearson","euclidean","p_pearson","max_location","umap-dist","rollingCorrelation"], Metrices to access distance between two profiles. Can be either a list of strings and/or dict. In case of a list of dicts, each dict must contain the keywords: 'fn' and 'name' providing a callable function with 'fn' that returns a single floating number and takes two arrays as an input.
* metricesForPrediction = None
* metricQuantileCutoff = 0.90
* minDistanceBetweenTwoPeaks = 3
Distance in fractions (int) between two peaks. Setting this to a smaller number results in more peaks.
* n_jobs = 12,
Number of workers to model peaks, to calculate distance pairs and to train and use the classifer.
* noDatabaseForPredictions = False,
If you want to use ComplexFinder without any database. Set this to *True*.
* normValueDict = {},
* noDistanceCalculationAndPrediction = False,
Set to *True* to use ComplexFinder without distance calculation and database prediction.
* peakModel = "GaussianModel",
Indicates which model should be used to model signal profiles. In principle all models from lmfit can be used.
However, the initial parameters are only optimized for GaussianModel and LaurentzianModel.
This might effect runtimes dramatically.
* plotSignalProfiles = False,
If True, each profile is plotted against the fractio along with the fitted models.
If you are concerned about time, you might set this to False at the cost of losing visible asessment of the fit quality.
* plotComplexProfiles = False,
* precision = 0.5
Precision to use to filter protein-protein interactions.
If None, the filtering will be performed based on the parameter *interactionProbabCutoff*.
* r2Thresh = 0.85
R2 threshold to accept a model fit. Models below the threshold will be ignored.
* removeSingleDataPointPeaks = True,
* restartAnalysis = False, bool.
Set True if you want to restart the anaylsis from scratch. If the tmp folder exsists, items and dirs will be deleted first.
* retrainClassifier = False,
Even if the trainedClassifier.sav file is found, the classifier is loaded and the training is skipped.
If you change the classifierGridSearch, you should set this to True.
This will ensure that the classifier training is never skipped.
* recalculateDistance = False,
* rollingWinType = None,
If None, all points are evenly weighted. Can be any string of scipy.signal window function.
(https://docs.scipy.org/doc/scipy/reference/signal.windows.html#module-scipy.signal.windows)
* runName = None,
* <del>savePeakModels = True</del> *depracted. always True and will be removed in the next version*.
* scaleRawDataBeforeDimensionalReduction = True,
If raw data should be used (*useRawDataForDimensionalReduction*)
enable this if you want to scale them. Scaling will be performed that values of each row are scaled between zero and one.
* smoothSignal = True
Enable/disable smoothing. Defaults to True. A moving average of at least 3 adjacent datapoints is calculated using
pandas rolling function. Effects the analysis time as well as the nmaximal number of peaks detected.
* smoothWindow = 2,
* topNCorrFeaturesForUMAPAlignment = 200,
Number of profiles used for UMAP Alignment. Only used if useRawDataForDimensionalReduction = True or noDistanceCalculationAndPrediction = True. The Features
will be identified by calculating the Pearson correlation coefficient.
* useRawDataForDimensionalReduction = False, Setting this to true, will force the pipeline to use the raw values for dimensional reduction. Distance calculations are not automatically turned off and the output is generated but they are not used.
* useFWHMForQuant = True
If quantFiles is specific, will use the FWHM for peak centric quantification. By default at least a mean of +/- peak centric fraction will
be consider (e.g. 3 fraction). However you cann allow single fraction quantification for narrow peaks by setting 'allowSingleFractionQuant' to True.
* umapDefaultKwargs = {"min_dist":0.0000001,"n_neighbors":3,"n_components":2},
If you want to perform an aligned UMPA consider altering the parameter alignment_window_size and alignment_regularisation. Find more information here
(https://umap-learn.readthedocs.io/en/latest/aligned_umap_basic_usage.html#aligning-varying-parameters)
* quantFiles = dict
* Quantifiaction files. dict with key name of co-fraction file and values with the path to the quantificaation file
Assuming your grouping is something like: {"WT":["WT_01.txt","WT_02.txt"]}. Then the quantification files must
contain a key for each file: something like {"WT_01.txt":"myCoolProject/quant/WT01_quant.txt","WT_02.txt":"myCoolProject/quant/WT02_quant.txt"}.
Assuming the folder myCoolProject/ exists where the main file is.
If analysing a TMT-SILAC experiment it is required to provide TMT labelings for heavy and light peaks separately, the
provided dict should look something like this:
{
"HEAVY_WT_01.txt":"myCoolProject/quant/WT01_quant_heavy.txt",
"LIGHT_WT_01.txt":"myCoolProject/quant/WT01_quant_light.txt"
}
Returns
-------
None
"""
self.params = {
"addImpurity" : addImpurity,
"indexIsID" : indexIsID,
"idColumn" : idColumn,
"n_jobs" : n_jobs,
"kFold" : kFold,
"analysisName" : analysisName,
"restartAnalysis" : restartAnalysis,
"metrices" : metrices,
"peakModel" : peakModel,
"smoothWindow" : smoothWindow,
"classifierClass" : classifierClass,
"retrainClassifier" : retrainClassifier,
"interactionProbabCutoff":interactionProbabCutoff,
"maxPeaksPerSignal" : maxPeaksPerSignal,
"maxPeakCenterDifference" : maxPeakCenterDifference,
"classiferGridSearch" : classiferGridSearch,
"plotSignalProfiles" : plotSignalProfiles,
"savePeakModels" : True, #must be true to process pipeline, depracted, remove from class arguments.
"removeSingleDataPointPeaks" : removeSingleDataPointPeaks,
"grouping" : grouping,
"analysisMode" : analysisMode,
"normValueDict" : normValueDict,
"databaseFilter" : databaseFilter,
"databaseIDColumn" : databaseIDColumn,
"databaseFileName" : databaseFileName,
"databaseHasComplexAnnotations" : databaseHasComplexAnnotations,
"r2Thresh" : r2Thresh,
"smoothSignal" : smoothSignal,
"umapDefaultKwargs" : umapDefaultKwargs,
"hdbscanDefaultKwargs" : hdbscanDefaultKwargs,
"noDatabaseForPredictions" : noDatabaseForPredictions,
"alignRuns" : alignRuns,
"alignMethod" : alignMethod,
"runName" : runName,
"useRawDataForDimensionalReduction" : useRawDataForDimensionalReduction,
"scaleRawDataBeforeDimensionalReduction" : scaleRawDataBeforeDimensionalReduction,
"metricQuantileCutoff": metricQuantileCutoff,
"recalculateDistance" : recalculateDistance,
"metricesForPrediction" : metricesForPrediction,
"minDistanceBetweenTwoPeaks" : minDistanceBetweenTwoPeaks,
"minimumPPsPerFeature" : minimumPPsPerFeature,
"plotComplexProfiles" : plotComplexProfiles,
"decoySizeFactor" : decoySizeFactor,
"classifierTestSize" : classifierTestSize,
"considerOnlyInteractionsPresentInAllRuns" : considerOnlyInteractionsPresentInAllRuns,
"precision" : precision,
"quantFiles" : quantFiles,
"compTabFormat" : compTabFormat,
"correlationWindowSize" : correlationWindowSize,
"takeRondomSampleFromData" : takeRondomSampleFromData,
"minPeakHeightOfMax" : minPeakHeightOfMax,
"justFitAndMatchPeaks" : justFitAndMatchPeaks,
"keepOnlySignalsValidInAllConditions" : keepOnlySignalsValidInAllConditions,
"noDistanceCalculationAndPrediction" : noDistanceCalculationAndPrediction,
"topNCorrFeaturesForUMAPAlignment" : topNCorrFeaturesForUMAPAlignment,
"databaseEntrySplitString": databaseEntrySplitString,
"version" : __VERSION__,
"usePeakCentricFeatures" : usePeakCentricFeatures,
"allowSingleFractionQuant" : allowSingleFractionQuant,
"useFWHMForQuant" : useFWHMForQuant,
"TMTPoolMethod" : TMTPoolMethod,
"transformQuantDataBy" : transformQuantDataBy
}
print("\n" + str(self.params))
self._checkParameterInput()
def _addMetricesToDB(self,analysisName):
"""
Adds distance metrices to the database entries
that were found in the co-elution profiles.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping matching metrices to DB.")
return
if "signalDiff" in self.params["metrices"]:
self.params["metrices"] = [x for x in self.params["metrices"] if x != "signalDiff"] + ["{}-diff".format(x) for x in np.arange(self.Xs[analysisName].columns.size)]
metricColumns = self.params["metrices"]
if not self.params["noDatabaseForPredictions"]:
self.DB.matchMetrices(self.params["pathToTmp"][analysisName],entriesInChunks[analysisName],metricColumns,analysisName,forceRematch=self.params["recalculateDistance"])
def _addMetricToStats(self,metricName, value):
"""
Adds a metric to the stats data frame.
Does not check if metric is represent, if present,
it will just overwrite.
Parameters
----------
metricName str
Name of metric to add
value str
Value of metric
Returns
-------
None
"""
if metricName in self.stats.columns:
self.stats.loc[self.currentAnalysisName,metricName] = value
def _addModelToSignals(self,signalModels):
"""
Adds fitted models to Signals. If not a valid
model was found, then the signal profile is removed.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
for fitModel in signalModels:
modelID = fitModel["id"]
if len(fitModel) == 1:
del self.Signals[self.currentAnalysisName][modelID]
if modelID in self.Signals[self.currentAnalysisName]:
for k,v in fitModel.items():
if k != 'id':
setattr(self.Signals[self.currentAnalysisName][modelID],k,v)
self.Signals[self.currentAnalysisName][modelID].saveResults()
def _attachQuantificationDetails(self, combinedPeakModels = None):
"""
"""
if self.params["analysisMode"] == "label-free":
if len(self.params["quantFiles"]) != 0:
print("Warning :: Quant files have been specified but anaylsis mode is label-free. Please define SILAC or TMT or SILAC-TMT")
print("Info :: Label-free mode selected. No additation quantification performed..")
return
if len(self.params["quantFiles"]) > 0:
files = np.array(list(self.params["grouping"].values())).flatten()
print(files)
print(self.params["quantFiles"].keys())
if len(self.params["quantFiles"]) != files.size and self.params["analysisMode"] != "SILAC-TMT":
print("Warning :: Different number of quantFiles and groupings provided.")
if self.params["analysisMode"] != "SILAC-TMT":
initFilesFound = [k for k in self.params["quantFiles"].keys() if k in files]
else:
print(self.params["quantFiles"])
for k in self.params["quantFiles"].keys():
print(k.split("HEAVY_",maxsplit=1))
initFilesFound = [k for k in self.params["quantFiles"].keys() if k.split("HEAVY_",maxsplit=1)[-1] in files or k.split("LIGHT_",maxsplit=1)[-1] in files]
print("Info :: For the following files and correpsonding co-elution profile data was detected")
print(initFilesFound)
print("Warning :: other files will be ignored.")
# elif self.params["analysisMode"] == "SILAC-TMT":
# if not all(f.startswith("HEAVY") or f.startswith("LIGHT") for f in self.params["quantFiles"].keys()):
# print("Warning :: If using a SILAC-TMT experiment, please provide 'HEAVY' and 'LIGHT' before the file in the dict 'quantFile' such as 'HEAVY_WT_01.txt':<path to quant file> as well as 'LIGHT_WT_01.txt':<path to quant file>")
print("combining Peaks!!")
if combinedPeakModels is None:
## load combined peak reuslts
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResults.txt")
if os.path.exists(txtOutput):
combinedPeakModels = pd.read_csv(txtOutput,sep="\t")
else:
print("Warning :: Combined peak model reuslts not found. Deleted? Skipping peak centric quantification.")
return
print("Info :: Starting peak centric quantification. In total {} peaks were found".format(combinedPeakModels.index.size))
print("Info :: Loading quantification files.")
if not all(os.path.exists(pathToQuantFile) for pathToQuantFile in self.params["quantFiles"].values()):
print("Warning :: Not all quant files found!")
if self.params["analysisMode"] != "SILAC-TMT":
print(self.params["quantFiles"].values())
path = list(self.params["quantFiles"].values())
print(os.path.abspath(path[0]))
quantFilesLoaded = [(k,pd.read_csv(v,sep="\t",index_col = 0),False) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
else:
quantFilesLoaded = [(k.split("HEAVY_",maxsplit=1)[-1] if "HEAVY" in k else k.split("LIGHT_",maxsplit=1)[-1],pd.read_csv(v,sep="\t",index_col = 0),"LIGHT" in k) for k,v in self.params["quantFiles"].items() if os.path.exists(v) and k in initFilesFound]
if len(quantFilesLoaded) == 0:
print("Warning :: No quant files found. Skipping peak-centric quantification.")
return
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: Assuming one SILAC ratio per fraction .")
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: Assuming the following order:")
print("Ignoring column headers, just uses the column index as follow..")
print("Fraction 1 - TMT reporter 1, Fraction 1 - TMT reporter 2, Faction 2 - TMT reporter 3 .... Fraction 2 - TMT reporter 1")
extractedQuantFiles = []
for k,quantFile,isLightQuantData in quantFilesLoaded:
print("Info :: Quantification of ", k)
centerColumnName = "Center_{}".format(k)
fwhmColumnName = "fwhm_{}".format(k)
quantFileName = "Q({})".format(k)
combinedPeakModelsFiltered = combinedPeakModels.dropna(subset=[centerColumnName])
lowerBound = combinedPeakModelsFiltered[centerColumnName] - combinedPeakModelsFiltered[fwhmColumnName]/1.7
upperBound = combinedPeakModelsFiltered[centerColumnName] + combinedPeakModelsFiltered[fwhmColumnName]/1.7
peakBounds = np.concatenate([lowerBound.values.reshape(-1,1),upperBound.values.reshape(-1,1)],axis=1)
peakBounds[:,1] += 1 #add one extra to use bounds as a range in python
#check bounds
peakBounds[peakBounds[:,0] < 0, 0] = 0
peakBounds[peakBounds[:,1] >= quantFile.columns.size, 1] = quantFile.columns.size - 1
#transform bounds to ints
peakBounds = np.around(peakBounds,0).astype(np.int64)
quantData = quantFile.loc[combinedPeakModelsFiltered["Key"].values].values
if self.params["analysisMode"] == "SILAC":
print("Info :: Peak centric quantification using SILAC :: extracting mean from file {}.".format(k))
out = extractMeanByBounds(
NPeakModels = combinedPeakModelsFiltered.index.size,
peakBounds = peakBounds,
quantData = quantData
)
quantColumnNames = ["SILAC({})_Mean".format(quantFileName),"SILAC({})_Error".format(quantFileName)]
print(out)
print(quantColumnNames)
dfResult = pd.DataFrame(out,index=combinedPeakModelsFiltered.index, columns = quantColumnNames)
dfResult = dfResult.join(pd.DataFrame(peakBounds,index=combinedPeakModelsFiltered.index, columns = ["SILAC({})_LowerBound".format(quantFileName),"SILAC({})_UpperBound".format(quantFileName)]))
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "TMT":
print("Info :: Peak centric quantification using TMT :: extracting sum from TMT reporters using file {}".format(self.params["quantFiles"][k]))
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
quantColumnNames = []
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
elif self.params["analysisMode"] == "SILAC-TMT":
print("Info :: Extracting quantification details from SILAC-TMT data.")
print("Info :: Detecting reporter channles..")
nFractions = self.Xs[k].shape[1]
nTMTs = quantData.shape[1] / nFractions
print("Info :: {} reporter channels detected and {} fractions.".format(nTMTs,nFractions))
if nTMTs != int(nTMTs):
print("Warning :: Could not detect the number of TMT reporter channles. Please check columns in quantFiles to have nTMTx x fractions columns")
continue
nTMTs = int(nTMTs)
# print(peakBounds)
# print(combinedPeakModels["Key"])
# print(isLightQuantData)
quantData[quantData == 0.0] = np.nan
out = extractMetricByShiftBounds(
NPeakModels = combinedPeakModels.index.size,
peakBounds = peakBounds,
quantData = quantData,
shift = nTMTs,
nFractions = nFractions
)
#print(out)
if isLightQuantData:
quantColumnNames = ["L_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
else:
quantColumnNames = ["H_({})_tmt_intensity_{}".format(k,n) for n in range(nTMTs)]
# print(a)
dfResult = pd.DataFrame(out,index=combinedPeakModels.index, columns = quantColumnNames)
extractedQuantFiles.append(dfResult)
combinedPeakModels = combinedPeakModels.join(extractedQuantFiles)
txtOutput = os.path.join(self.params["pathToComb"],"CombinedPeakModelResultsQuant.txt")
combinedPeakModels.to_csv(txtOutput,sep="\t")
def _checkParameterInput(self):
"""
Checks the input.
Parameters
----------
Returns
-------
None
Raises
-------
ValueErrors if datatype if given parameters do not match.
"""
#check anaylsis mode
validModes = ["label-free","SILAC","SILAC-TMT","TMT"]
if self.params["analysisMode"] not in validModes:
raise ValueError("Parmaeter analysis mode is not valid. Must be one of: {}".format(validModes))
elif self.params["analysisMode"] != "label-free" and len(self.params["quantFiles"]) == 0:
raise ValueError("Length 'quantFiles must be at least 1 if the analysis mode is not set to 'label-free'.")
if not isinstance(self.params["maxPeaksPerSignal"],int):
raise ValueError("maxPeaksPerSignal must be an integer. Current setting: {}".forma(self.params["maxPeaksPerSignal"]))
elif self.params["maxPeaksPerSignal"] <= 2:
raise ValueError("maxPeaksPerSignal must be greater than or equal 2")
elif self.params["maxPeaksPerSignal"] > 20:
print("Warning :: maxPeaksPerSignal is set to above 20, this may take quite long to model.")
#r2 validation
if not isinstance(self.params["r2Thresh"],float):
raise ValueError("Parameter r2Trehsh mus be a floating number.")
elif self.params["r2Thresh"] < 0.5:
print("Warning :: threshold for r2 is set below 0.5. This might result in fits of poor quality")
elif self.params["r2Thresh"] > 0.95:
print("Warning :: threshold for r2 is above 0.95. Relatively few features might pass this limit.")
elif self.params["r2Thresh"] > 0.99:
raise ValueError("Threshold for r2 was above 0.99. Please set a lower value.")
#minPeakHeightOfMax
if not isinstance(self.params["minPeakHeightOfMax"],float) and self.params["minPeakHeightOfMax"] < 1 and self.params["minPeakHeightOfMax"] >= 0:
raise ValueError("Parameter 'minPeakHeightOfMax' must be a float smaller than 1.0 and greather/equal 0.0.")
#k-fold
if not isinstance(self.params["kFold"],int):
raise ValueError("Parameter kFold mus be an integer.")
elif self.params["kFold"] < 2:
raise ValueError("Parameter kFold must be at least 2.")
if self.params["alignMethod"] not in alignModels:
raise ValueError("Parameter alignMethod must be in {}".format(alignModels.values()))
if not isinstance(self.params["metricQuantileCutoff"],float) or self.params["metricQuantileCutoff"] <= 0 or self.params["metricQuantileCutoff"] >= 1:
raise ValueError("Parameter metricQuantileCutoff must be a float greater than 0 and smaller than 1.")
#add database checks
if self.params["metricesForPrediction"] is not None:
if not isinstance(self.params["metricesForPrediction"],list):
raise TypeError("metricesForPrediction must be a list.")
else:
if not all(x in self.params["metrices"] for x in self.params["metricesForPrediction"]):
raise ValueError("All metrices given in 'metricesForPrediction' must be present in 'metrices'.")
else:
self.params["metricesForPrediction"] = self.params["metrices"]
def _chunkPrediction(self,pathToChunk,classifier,nMetrices,probCutoff):
"""
Predicts for each chunk the proability for positive interactions.
Parameters
----------
pathToChunk : str
classifier : classfierClass
Trained classifier.
nMetrices : int
Number if metrices used. (since chunks are simple numpy arrays, no column headers are loaded)
probCutoff : float
Probability cutoff.
Returns
-------
Numpy array. Chunks with appended probability.
"""
X = np.load(pathToChunk,allow_pickle=True)
boolSelfIntIdx = X[:,0] != X[:,1]
X = X[boolSelfIntIdx]
classProba = classifier.predict(X[:,[n+3 for n in range(nMetrices)]])
#boolPredIdx = classProba >= probCutoff
#boolIdx = np.sum(boolPredIdx,axis=1) > 0
predX = np.append(X[:,2],classProba.reshape(X.shape[0],-1),axis=1)
np.save(
file = pathToChunk,
arr = predX)
return predX
def _load(self, X):
"""
Intitiates data.
Parameters
----------
X pd.DataFrame
Returns
-------
None
Raises
-------
ValueError if X is not a pandas data frame.
"""
if isinstance(X, pd.DataFrame):
self.X = X
if not self.params["indexIsID"]:
print("Info :: Checking for duplicates")
dupRemoved = self.X.drop_duplicates(subset=[self.params["idColumn"]])
if dupRemoved.index.size < self.X.index.size:
print("Warning :: Duplicates detected.")
print("File contained duplicate ids which will be removed: {}".format(self.X.index.size-dupRemoved.index.size))
self.X = dupRemoved
self.X = self.X.set_index(self.params["idColumn"])
self.X = self.X.astype(np.float32)
else:
self.X = self.X.loc[self.X.index.drop_duplicates()] #remove duplicaates
self.X = self.X.astype(np.float32) #set dtype to 32 to save memory
if self.params["takeRondomSampleFromData"] != False and self.params["takeRondomSampleFromData"] > 50:
self.X = self.X.sample(self.params["takeRondomSampleFromData"])
print("Random samples taken from data. New data size {}".format(self.X.index.size))
self.params["rawData"][self.currentAnalysisName] = self.X.copy()
else:
raise ValueError("X must be a pandas data frame")
def _loadReferenceDB(self):
"""
Load reference database.
filterDB (dict) is passed to the pandas pd.DataFrame.isin function.
Parameters
----------
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. No database laoded.")
return
if self.params["noDatabaseForPredictions"]:
print("Info :: Parameter noDatabaseForPredictions was set to True. No database laoded.")
return
print("Info :: Load positive set from data base")
if not hasattr(self,"DB"):
self.DB = Database(nJobs = self.params["n_jobs"], splitString=self.params["databaseEntrySplitString"])
pathToDatabase = os.path.join(self.params["pathToComb"], "InteractionDatabase.txt")
if os.path.exists(pathToDatabase):
dbSize = self.DB.loadDatabaseFromFile(pathToDatabase)
print("Info :: Database found and loaded. Contains {} positive interactions.".format(dbSize))
# self._addMetricToStats("nPositiveInteractions",dbSize)
else:
self.DB.pariwiseProteinInteractions(
self.params["databaseIDColumn"],
dbID = self.params["databaseFileName"],
filterDb=self.params["databaseFilter"])
entryList = []
for analysisName in self.params["analysisName"]:
entryList.extend([entryID for entryID,Signal in self.Signals[analysisName].items() if Signal.valid])
entryList = np.unique(np.array(entryList).flatten())
print("Info :: Features used for filtering: {}".format(len(entryList)))
dbSize = self.DB.filterDBByEntryList(entryList)
#add decoy to db
if dbSize == 0:
raise ValueError("Warning :: No hits found in database. Check dabaseFilter keyword.")
elif dbSize < 150:
raise ValueError("Warining :: Less than 150 pairwise interactions found.")
elif dbSize < 200:
#raise ValueError("Filtered positive database contains less than 200 interactions..")
print("Warning :: Filtered positive database contains less than 200 interactions.. {}".format(dbSize))
print("Warning :: Please check carefully, if the classifier has enough predictive power.")
self.DB.addDecoy(sizeFraction=self.params["decoySizeFactor"])
self.DB.df.to_csv(pathToDatabase,sep="\t")
print("Info :: Database saved to {}".format(pathToDatabase))
def _checkGroups(self):
"Checks grouping. For comparision of multiple co-elution data sets."
if isinstance(self.params["grouping"],dict):
if len(self.params["grouping"]) == 0:
raise ValueError("Example for grouping : {'KO':['KO_01.txt','KO_02.txt'], 'WT':['WT_01.txt','WT_02.txt'] } Aborting.. ")
else:
combinedSamples = sum(self.params["grouping"].values(), [])
if all(x in combinedSamples for x in self.params["analysisName"]):
print("Grouping checked..\nAll columnSuffixes found in grouping.")
print("If you are using the combat format, the grouping has to be named as '<combatFileName><sample name>")
else:
raise ValueError("Could not find all grouping names in loaded dataframe.. Aborting ..")
def _findPeaks(self, n_jobs=3):
"""
Initiates for each feature in the data a Signal instance.
Peak detection and modelling is then performed.
Results are saved to hard drive for each run.
Numerous parameters effect signal modelling (smoothing, maxPeaks, r2Thresh, ...)
Create self.Signals (OrderedDict) which is a dict. Key = analysisName, which
contains another dict with entries as keys and values are of type Signal class.
Parameters
----------
n_jobs int. Number of worker processes.
Returns
-------
None
"""
if self.allSamplesFound:
print("Info :: Signals loaded and found. Proceeding ...")
return
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
if os.path.exists(pathToSignal):
self.Signals = load(pathToSignal)
print("\nLoading pickled signal intensity")
if all(analysisName in self.Signals for analysisName in self.params["analysisName"]):
print("Info :: All samples found in loaded Signals..")
self.allSamplesFound = True
return
if not hasattr(self , "Signals"):
self.Signals = OrderedDict()
self.Signals[self.currentAnalysisName] = dict()
peakModel = self.params['peakModel']
for entryID, signal in self.X.iterrows():
self.Signals[self.currentAnalysisName][entryID] = Signal(signal.values,
ID = entryID,
peakModel = peakModel,
smoothSignal = self.params["smoothSignal"],
savePlots = self.params["plotSignalProfiles"],
savePeakModels = self.params["savePeakModels"],
maxPeaks = self.params["maxPeaksPerSignal"],
metrices = self.params["metrices"],
pathToTmp = self.params["pathToTmp"][self.currentAnalysisName],
normalizationValue = self.params["normValueDict"][entryID] if entryID in self.params["normValueDict"] else None,
removeSingleDataPointPeaks = self.params["removeSingleDataPointPeaks"],
analysisName = self.currentAnalysisName,
r2Thresh = self.params["r2Thresh"],
smoothRollingWindow = self.params["smoothWindow"],
minDistanceBetweenTwoPeaks = self.params["minDistanceBetweenTwoPeaks"],
minPeakHeightOfMax = self.params["minPeakHeightOfMax"])
t1 = time.time()
print("\n\nStarting Signal modelling .. (n_jobs = {})".format(n_jobs))
fittedModels = Parallel(n_jobs=n_jobs, verbose=1)(delayed(Signal.fitModel)() for Signal in self.Signals[self.currentAnalysisName].values())
self._addModelToSignals(fittedModels)
self._saveSignalFitStatistics()
print("Peak fitting done time : {} secs".format(round((time.time()-t1))))
print("Each feature's fitted models is stored as pdf and txt is stored in model plots (if savePeakModels and plotSignalProfiles was set to true)")
def _saveSignals(self):
""
if hasattr(self,"Signals") :
pathToSignal = os.path.join(self.params["pathToComb"],"signals.lzma")
dump(self.Signals.copy(),pathToSignal)
self.Xs = OrderedDict()
for analysisName in self.params["analysisName"]:
pathToFile = os.path.join(self.params["pathToTmp"][analysisName],"validProcessedSignals({}).txt".format(analysisName))
signals = self.Signals[analysisName]
validSignalData = dict([(k,v.Y) for k,v in signals.items() if v.valid and v.validModel])
fitDataSignal = dict([(k,v.fitSignal.flatten()) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
dfProcessedSignal = pd.DataFrame().from_dict(validSignalData,orient="index")
dfFit = pd.DataFrame().from_dict(fitDataSignal, orient="index")
if self.params["removeSingleDataPointPeaks"]:
numberofPeaks = dict([(k,v.removedDataPoints) for k,v in signals.items() if v.valid and v.validModel and v.fitSignal is not None])
nRemovedData = pd.DataFrame().from_dict(numberofPeaks,orient="index")
nRemovedData.columns = ["#removedDataPoints"]
dfFit = dfFit.join(nRemovedData)
#print(self.params["rawData"][analysisName].index)
df = dfProcessedSignal.join(self.params["rawData"][analysisName],rsuffix="_raw",lsuffix="_processed")
df = df.join(dfFit,rsuffix = "_fit")
df.to_csv(pathToFile,sep="\t")
self.Xs[analysisName] = dfProcessedSignal
X = self.Xs[analysisName].reset_index()
np.save(os.path.join(self.params["pathToTmp"][analysisName],"source.npy"),X.values)
for analysisName in self.params["analysisName"]:
#clean invalid signals
if self.params["keepOnlySignalsValidInAllConditions"]:
toDelete = [k for k,v in self.Signals[analysisName].items() if not all(k in self.Signals[analysisName] and self.Signals[analysisName][k].valid for analysisName in self.params["analysisName"])]
else:
toDelete = [k for k,v in self.Signals[analysisName].items() if not v.valid]
#delete Signals that do match criteria
for k in toDelete:
del self.Signals[analysisName][k]
def _calculateDistance(self):
"""
Calculates Distance between protein protein pairs based
on their signal profile.
Parameters
----------
signalModels - list
List of modelfits (dict)
Returns
-------
None
"""
if self.params["noDistanceCalculationAndPrediction"]:
print("noDistanceCalculationAndPrediction was enabled. Skipping Distance Calculations.")
return
global entriesInChunks
print("\nStarting Distance Calculation ...")
t1 = time.time()
chunks = self.signalChunks[self.currentAnalysisName]
#return
entrieChunkPath = os.path.join(self.params["pathToComb"], "entriesInChunk.pkl")
if not self.params["recalculateDistance"] and all(os.path.exists(x.replace(".pkl",".npy")) for x in chunks) and os.path.exists(entrieChunkPath):
print("All chunks found for distance calculation.")
if not self.entriesInChunkLoaded:
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"rb") as f:
entriesInChunks = pickle.load(f)
self.entriesInChunkLoaded = True
else:
chunkItems = Parallel(n_jobs=self.params["n_jobs"], verbose=10)(delayed(calculateDistanceP)(c) for c in chunks)
entriesInChunks[self.currentAnalysisName] = {}
for k,v in chunkItems:
for E1E2 in v:
entriesInChunks[self.currentAnalysisName][E1E2] = k
with open(os.path.join(self.params["pathToComb"], "entriesInChunk.pkl"),"wb") as f:
pickle.dump(entriesInChunks,f)
print("Distance computing/checking: {} secs\n".format(round(time.time()-t1)))
def _createSignalChunks(self,chunkSize = 30):
"""
Creates signal chunks at given chunk size.
Parameter
---------
chunkSize - int. default 30. Nuber of signals in a single chunk.
Returns
-------
list of paths to the saved chunks.
"""
pathToSignalChunk = os.path.join(self.params["pathToComb"],"signalChunkNames.lzma")
if os.path.exists(pathToSignalChunk) and not self.params["recalculateDistance"]:
self.signalChunks = load(pathToSignalChunk)
print("Info :: Signal chunks loaded and found. Checking if all runs are present.")
if all(analysisName in self.signalChunks for analysisName in self.params["analysisName"]):
print("Info :: Checked... all samples found.")
return
else:
print("Info :: Not all samples found. Creating new signal chunks..")
if not hasattr(self,"signalChunks"):
self.signalChunks = dict()
else:
self.signalChunks.clear()
for analysisName in self.params["analysisName"]:
print("Info :: {} signal chunk creation started.\nThis may take some minutes.." .format(analysisName))
if "umap-dist" in self.params["metrices"]:
#umap dist calculations
print("Info :: Calculation UMAP.")
embed = umap.UMAP(min_dist=0.0000000000001, n_neighbors=5, metric = "correlation", random_state=56).fit_transform(minMaxNorm(self.Xs[analysisName].values,axis=1))
embed = pd.DataFrame(embed,index=self.Xs[analysisName].index)
#save embedding
embed.to_csv(os.path.join(self.params["pathToTmp"][analysisName],"chunks","embeddings.txt"),sep="\t")
signals = list(self.Signals[analysisName].values())
for n,Signal in enumerate(self.Signals[analysisName].values()):
setattr(Signal,"otherSignals", signals[n:])
c = []
for n,chunk in enumerate(chunks(signals,chunkSize)):
pathToChunk = os.path.join(self.params["pathToTmp"][analysisName],"chunks",str(n)+".pkl")
#if not os.path.exists(pathToChunk) and not self.params["recalculateDistance"]:
chunkItems = [
{
"ID" : str(signal.ID),
"chunkName" : str(n),
"Y" : np.array(signal.Y),
"ownPeaks" : signal.getPeaksAndsIDs(),
"otherSignalPeaks" : [s.getPeaksAndsIDs() for s in signal.otherSignals],
"E2" : [str(s.ID) for s in signal.otherSignals],
"metrices" : self.params["metrices"],
"pathToTmp" : self.params["pathToTmp"][analysisName],
"correlationWindowSize" : self.params["correlationWindowSize"],
"embedding" : embed.loc[signal.ID].values if "umap-dist" in self.params["metrices"] else [],
} for signal in chunk]
with open(pathToChunk,"wb") as f:
pickle.dump(chunkItems,f)
c.append(pathToChunk)
self.signalChunks[analysisName] = [p for p in c if os.path.exists(p)] #
#saves signal chunls.
dump(self.signalChunks,pathToSignalChunk)
def _collectRSquaredAndFitDetails(self):
"""
Data are collected from txt files in the modelPlots folder.
"""
if not self.params["savePeakModels"]:
print("!! Warning !! This parameter is depracted and from now on always true.")
self.params["savePeakModels"] = True
pathToPlotFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result","modelPlots")
resultFolder = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
fittedPeaksPath = os.path.join(resultFolder,"fittedPeaks_{}.txt".format(self.currentAnalysisName))
nPeaksPath = os.path.join(resultFolder,"nPeaks.txt")
if os.path.exists(fittedPeaksPath) and os.path.exists(nPeaksPath):
print("Warning :: FittedPeaks detected. If you changed the data, you have to set the paramter 'restartAnalysis' True to include changes..")
return
if not os.path.exists(resultFolder):
os.mkdir(resultFolder)
#number of peaks
collectNumbPeaks = []
data = [{"Key":signal.ID,
"ID" : n,
"R2":signal.Rsquared,
"#Peaks":len(signal.modelledPeaks),
"Center":peakParam["mu"],
"Amplitude":peakParam["A"],
"Sigma":peakParam["sigma"],
"fwhm":peakParam["fwhm"],
"height" : peakParam["height"],
"AUC" : peakParam["AUC"],
"relAUC" : peakParam["relAUC"],
"validModel":signal.validModel,
"validData":signal.validData,
"Y": ",".join([str(round(x,3)) for x in peakParam["Y"]])} for signal in self.Signals[self.currentAnalysisName].values() if signal.valid for n,peakParam in enumerate(signal.modelledPeaks)]
df = pd.DataFrame().from_dict(data)
df.to_csv(fittedPeaksPath,sep="\t",index=None)
# # find peak properties..
# df = pd.DataFrame(columns=["Key","ID","Amplitude","Center","Sigma","fwhm","height","auc"])
# for file in os.listdir(pathToPlotFolder):
# if file.endswith(".txt"):
# try:
# dfApp = pd.read_csv(os.path.join(pathToPlotFolder,file), sep="\t")
# df = df.append(dfApp)
# collectNumbPeaks.append({"Key":dfApp["Key"].iloc[0],"N":len(dfApp.index)})
# except:
# continue
#pd.DataFrame(collectNumbPeaks).to_csv(nPeaksPath,sep="\t", index = None)
def _trainPredictor(self, addImpurity = 0.3, apexTraining = False):
"""
Trains the predictor based on positive interactions
in the database.
Parameters
----------
Returns
-------
None
"""
#metricColumns = [col for col in self.DB.df.columns if any(x in col for x in self.params["metrices"])]
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Predictor training skipped (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True). Distance metrices/Raw signals are used for dimensional reduction.")
return
folderToResults = [os.path.join(self.params["pathToTmp"][analysisName],"result") for analysisName in self.params["analysisName"]]
classifierFileName = os.path.join(self.params["pathToComb"],'trainedClassifier_{}.sav'.format(self.params["classifierClass"]))
if not self.params["retrainClassifier"] and os.path.exists(classifierFileName): #enumerate(
print("Info :: Prediction was done already... loading file")
self.classifier = joblib.load(classifierFileName)
return
metricColumnsForPrediction = self.params["metrices"]
totalColumns = metricColumnsForPrediction + ['Class',"E1E2"]
data = [self.DB.dfMetrices[analysisName][totalColumns].dropna(subset=metricColumnsForPrediction) for analysisName in self.params["analysisName"]]
data = pd.concat(data, ignore_index=True)
dataForTraining = data[["E1E2","Class"] + metricColumnsForPrediction]
dataForTraining["Class"] = dataForTraining["Class"].astype(np.float64)
print("Info :: Merging database metrices.")
print("Test size for classifier: {}".format(self.params["classifierTestSize"]))
if apexTraining and "apex" in totalColumns:
print("Info :: Performing apex based pooling.")
dataForTraining = dataForTraining.sort_values("apex").drop_duplicates("E1E2")
else:
dataForTraining = dataForTraining.groupby(dataForTraining['E1E2']).aggregate("min")
dataForTraining['Class'] = dataForTraining['Class'].astype(np.int64)
dataForTraining = dataForTraining.reset_index()
print("Info :: Using a total of {} features for classifier training.".format(dataForTraining.index.size))
if addImpurity > 0:
nRows = dataForTraining.index.size
rowIdx = np.random.choice(nRows,int(nRows * addImpurity),replace=False)#np.random.randint(0,nRows,size=int(nRows * addImpurity))
print(dataForTraining.loc[rowIdx,'Class'] ^ 1)
dataForTraining.loc[rowIdx,'Class'] = dataForTraining.loc[rowIdx,'Class'] ^ 1
print("Warning :: Stop! Using impurity for the training data is not advisable other than for testing. You should probably not do this?")
Y = dataForTraining['Class'].values
X = dataForTraining.loc[:,metricColumnsForPrediction].values
self.classifier = Classifier(
classifierClass = self.params["classifierClass"],
n_jobs=self.params['n_jobs'],
gridSearch = self.params["classiferGridSearch"],
testSize = self.params["classifierTestSize"])
probabilites, meanAuc, stdAuc, oobScore, optParams, Y_test, Y_pred = self.classifier.fit(X,Y,kFold=self.params["kFold"],pathToResults=self.params["pathToComb"], metricColumns = metricColumnsForPrediction)
dataForTraining["PredictionClass"] = probabilites
#save prediction summary
pathToFImport = os.path.join(self.params["pathToComb"],"PredictorSummary{}_{}.txt".format(self.params["metrices"],self.params["addImpurity"]))
#create and save classification report
classReport = classification_report(
Y_test,
Y_pred,
digits=3,
output_dict=True)
classReport = OrderedDict([(k,v) for k,v in classReport.items() if k != 'accuracy'])
pd.DataFrame().from_dict(classReport, orient="index").to_csv(pathToFImport, sep="\t", index=True)
#save database prediction
dataForTraining.to_csv(os.path.join(self.params["pathToComb"],"DBpred({}).txt".format(self.params["addImpurity"])),sep="\t", index=False)
self._plotFeatureImportance(self.params["pathToComb"])
joblib.dump(self.classifier, classifierFileName)
self._addMetricToStats("Metrices",str(metricColumnsForPrediction))
self._addMetricToStats("OOB_Score",oobScore)
self._addMetricToStats("ROC_Curve_AUC","{}+-{}".format(meanAuc,stdAuc))
self._addMetricToStats("ClassifierParams",optParams)
print("DB prediction saved - DBpred.txt :: Classifier pickled and saved 'trainedClassifier.sav'")
def _loadPairsForPrediction(self):
""
#load chunks that were saved
chunks = [f for f in os.listdir(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks")) if f.endswith(".npy") and f != "source.npy"]
print("\nInfo :: Prediction/Dimensional reduction started...")
for chunk in chunks:
X = np.load(os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"chunks",chunk),allow_pickle=True)
yield (X,len(chunks))
def _predictInteractions(self):
""
if self.params["noDatabaseForPredictions"] or self.params["noDistanceCalculationAndPrediction"]:
print("Info :: Skipping predictions. (noDatabaseForPredictions = True or noDistanceCalculationAndPrediction = True)")
return
paramDict = {"NumberInteractions" : 0, "positiveInteractors" : 0, "decoyInteractors" : 0, "novelInteractions" : 0, "interComplexInteractions" : 0}
probCutoffs = dict([(cutoff,paramDict.copy()) for cutoff in np.linspace(0.0,0.99,num=30)])
print("Info :: Starting prediction ..")
folderToOutput = os.path.join(self.params["pathToTmp"][self.currentAnalysisName],"result")
pathToPrediction = os.path.join(folderToOutput,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if False and not self.params["retrainClassifier"] and os.path.exists(pathToPrediction):
predInts = pd.read_csv(pathToPrediction, sep="\t")
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = predInts.index.size
return predInts
# del self.Signals
#gc.collect()
#create prob columns of k fold
pColumns = ["Prob_{}".format(n) for n in range(len(self.classifier.predictors))]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]] + pColumns + ["In DB"]
if not os.path.exists(folderToOutput):
os.mkdir(folderToOutput)
predInteractions = None
metricIdx = [n + 4 if "apex" in self.params["metrices"] else n + 3 for n in range(len(self.params["metrices"]))] #in order to extract from dinstances, apex creates an extra column (apex_dist)
for n,(X,nChunks) in enumerate(self._loadPairsForPrediction()):
boolSelfIntIdx = X[:,0] == X[:,1]
if n % 5 == 0:
percDone = round(n/nChunks*100,1)
print(percDone,r"%")
X = X[boolSelfIntIdx == False]
#first two rows E1 E2, and E1E2, apexPeakDist remove before predict
if X.shape[0] > 0:
classProba = self.classifier.predict(X[:,metricIdx])
else:
continue
if classProba is None:
continue
predX = np.append(X,classProba.reshape(X.shape[0],-1),axis=1)
interactionClass = self.DB.getInteractionClassByE1E2(X[:,2],X[:,0],X[:,1])
for cutoff in probCutoffs.keys():
boolPredIdx = classProba >= cutoff
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
counts = interactionClass.loc[boolIdx].value_counts()
n = np.sum(boolIdx)
probCutoffs[cutoff]["NumberInteractions"] += n
probCutoffs[cutoff]["positiveInteractors"] += counts["pos"] if "pos" in counts.index else 0
probCutoffs[cutoff]["decoyInteractors"] += counts["decoy"] if "decoy" in counts.index else 0
probCutoffs[cutoff]["novelInteractions"] += counts["unknown/novel"] if "unknown/novel" in counts.index else 0
probCutoffs[cutoff]["interComplexInteractions"] += counts["inter"] if "inter" in counts.index else 0
boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
if len(boolPredIdx.shape) > 1:
boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
else:
boolIdx = boolPredIdx
predX = np.append(predX,interactionClass.values.reshape(predX.shape[0],1),axis=1)
if predInteractions is None:
predInteractions = predX[boolIdx,:]
else:
predInteractions = np.append(predInteractions,predX[boolIdx,:], axis=0)
probData = pd.DataFrame().from_dict(probCutoffs, orient="index")
probData["FalseNegatives"] = probData["positiveInteractors"].iloc[0] - probData["positiveInteractors"]
probData["precision"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["interComplexInteractions"] + probData["decoyInteractors"])
probData["recall"] = (probData["positiveInteractors"]) / (probData["positiveInteractors"] + probData["FalseNegatives"])
probData["F1-measure"] = 2 * ((probData["precision"] * probData["recall"]) / (probData["precision"] + probData["recall"]))
probData["F-measure(b=2)"] = (1+2**2) * ((probData["precision"] * probData["recall"]) / (((2**2) * probData["precision"]) + probData["recall"]))
probData["F-measure(b=0.5)"] = (1+0.5**2)* ((probData["precision"] * probData["recall"]) / (((0.5**2) * probData["precision"]) + probData["recall"]))
#self.params["interactionProbabCutoff"] = float(probData.idxmax().loc["F1-measure"])
print("Info :: Interaction probability was set to: {} based on the F-metric using beta = 1.".format(self.params["interactionProbabCutoff"] ))
# boolPredIdx = classProba >= self.params["interactionProbabCutoff"]
# if len(boolPredIdx.shape) > 1:
# boolIdx = np.sum(boolPredIdx,axis=1) == self.params["kFold"]
# else:
# boolIdx = boolPredIdx
probData.to_csv(os.path.join(folderToOutput,"classiferPerformanceMetrics_{}_addImp{}.txt".format(self.params["classifierClass"],self.params["addImpurity"])),sep="\t")
# print("Interactions > cutoff :", predInteractions.shape[0])
# print("Info :: Finding interactions in DB")
# boolDbMatch = np.isin(predInteractions[:,2],self.DB.df["E1E2"].values, assume_unique=True)
# print("Info :: Appending matches.")
# predInteractions = np.append(predInteractions,boolDbMatch.reshape(predInteractions.shape[0],1),axis=1)
d = pd.DataFrame(predInteractions, columns = dfColumns)
print("Info :: Number of interactions detected: {} at cut-off {}".format(d.index.size,self.params["interactionProbabCutoff"]))
boolDbMatch = d["In DB"] == "pos"
print("Info :: Annotate complexes to pred. interactions.")
d["ComplexID"], d["ComplexName"] = zip(*[self._attachComplexID(_bool,E1E2) for E1E2, _bool in zip(predInteractions[:,2], boolDbMatch)])
d = self._attachPeakIDtoEntries(d)
# boolIdx = d[pColumns[0]] > self.params["interactionProbabCutoff"]
# d = d.loc[boolIdx]
origSize = d.index.size
print("Info : Filter for at least {} times in predicted interactions".format(self.params["minimumPPsPerFeature"]))
if self.params["usePeakCentricFeatures"]:
eColumns = ["E1p","E2p"]
else:
eColumns = ["E1","E2"]
Es = pd.Series(d[eColumns].values.flatten())
EsCounted = Es.value_counts()
boolIdx = EsCounted >= self.params["minimumPPsPerFeature"]
duplicatedPPs = EsCounted.index[boolIdx]
d = d.loc[d[eColumns].isin(duplicatedPPs).all(axis="columns")]
print("Removed interactions {}".format(origSize-d.index.size))
d.to_csv(pathToPrediction, sep="\t", index=False)
self.stats.loc[self.currentAnalysisName,"nInteractions ({})".format(self.params["interactionProbabCutoff"])] = d.index.size
self.stats.loc[self.currentAnalysisName,"Classifier"] = self.params["classifierClass"]
return d
def _attachComplexID(self,_bool,E1E2):
""
if not _bool:
return ("","")
else:
df = self.DB.df[self.DB.df["E1E2"] == E1E2]
return (';'.join([str(x) for x in df["ComplexID"].tolist()]),
';'.join([str(x) for x in df["complexName"].tolist()]))
def _plotChunkSummary(self, data, fileName, folderToOutput):
"util fn"
data[self.params["metrices"]] = self.classifier._scaleFeatures(data[self.params["metrices"]].values)
fig, ax = plt.subplots()
XX = data.melt(id_vars = [x for x in data.columns if x not in self.params["metrices"]],value_vars=self.params["metrices"])
sns.boxplot(data = XX, ax=ax, y = "value", x = "variable", hue = "Class")
plt.savefig(os.path.join(folderToOutput,"{}.pdf".format(fileName)))
plt.close()
def _plotFeatureImportance(self,folderToOutput,*args,**kwargs):
"""
Creates a bar chart showing the estimated feature importances
Parameters
----------
folderToOutput : string
Path to folder to save the pdf. Will be created if it does not exist.
*args
Variable length argument list passed to matplotlib.bar.
**kwargs
Arbitrary keyword arguments passed to matplotlib.bar.
Returns
-------
None
"""
fImp = self.classifier.getFeatureImportance()
self._makeFolder(folderToOutput)
if fImp is not None:
#save as txt file
pd.DataFrame(fImp, columns= self.params["metrices"]).to_csv(os.path.join(folderToOutput,"featureImportance{}.txt".format(self.params["metrices"])), sep="\t")
#plot feature importance
fig, ax = plt.subplots()
xPos = np.arange(len(self.params["metrices"]))
ax.bar(x = xPos, height = np.mean(fImp,axis=0), *args,**kwargs)
ax.errorbar(x = xPos, y = np.mean(fImp,axis=0), yerr = np.std(fImp,axis=0))
ax.set_xticks(xPos)
ax.set_xticklabels(self.params["metrices"], rotation = 45)
plt.savefig(os.path.join(folderToOutput,"featureImportance.pdf"))
plt.close()
def _randomStr(self,n):
"""
Returns a random string (lower and upper case) of size n
Parameters
----------
n : int
Length of string
Returns
-------
random string of length n
"""
letters = string.ascii_lowercase + string.ascii_uppercase
return "".join(random.choice(letters) for i in range(n))
def _scoreComplexes(self, complexDf, complexMemberIds = "subunits(UniProt IDs)", beta=2.5):
""
entryPositiveComplex = [self.DB.assignComplexToProtein(str(e),complexMemberIds,"ComplexID") for e in complexDf.index]
complexDf.loc[:,"ComplexID"] = entryPositiveComplex
matchingResults = pd.DataFrame(columns = ["Entry","Cluster Labels","Complex ID", "NumberOfInteractionsInDB"])
clearedEntries = pd.Series([x.split("_")[0] for x in complexDf.index], index=complexDf.index)
for c,d in self.DB.indentifiedComplexes.items():
boolMatch = clearedEntries.isin(d["members"])
clusters = complexDf.loc[boolMatch,"Cluster Labels"].values.flatten()
nEntriesMatch = np.sum(boolMatch)
if nEntriesMatch > 1:
groundTruth = [c] * nEntriesMatch
matchingResults = matchingResults.append(pd.DataFrame().from_dict({"Entry":complexDf.index[boolMatch].values,
"Cluster Labels" : clusters,
"Complex ID": groundTruth,
"NumberOfInteractionsInDB" : [d["n"]] * nEntriesMatch}) ,ignore_index=True)
if not matchingResults.empty:
score = v_measure_score(matchingResults["Complex ID"],matchingResults["Cluster Labels"],beta = beta)
else:
score = np.nan
return complexDf , score, matchingResults
def _clusterInteractions(self, predInts, clusterMethod = "HDBSCAN", plotEmbedding = True, groupFiles = [], combineProbs = True, groupName = ""):
"""
Performs dimensional reduction and clustering of prediction distance matrix over a defined parameter grid.
Parameter
predInts - ndarray.
clusterMethod - string. Any string of ["HDBSCAN",]
plotEmbedding - bool. If true, embedding is plotted and save to pdf and txt file.
returns
None
"""
embedd = None
bestDf = None
topCorrFeatures = None
splitLabels = False
recordScore = OrderedDict()
saveEmbeddings = []
maxScore = np.inf
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metricesForPrediction"]]
cb = ComplexBuilder(method=clusterMethod)
print("\nPredict complexes")
if predInts is None:
print("No database provided. UMAP and clustering will be performed using defaultKwargs. (noDatabaseForPredictions = True)")
pathToFolder = self._makeFolder(self.params["pathToComb"],"complexIdentification_{}".format(self.params["addImpurity"]))
if not self.params["databaseHasComplexAnnotations"] and not self.params["noDatabaseForPredictions"] and predInts is not None:
print("Database does not contain complex annotations. Therefore standard UMAP settings are HDBSCAN settings are used for complex identification.")
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd = cb.fit(predInts,
metricColumns = metricColumns,
scaler = self.classifier._scaleFeatures,
umapKwargs= self.params["umapDefaultKwargs"])
elif self.params["noDistanceCalculationAndPrediction"] or self.params["noDatabaseForPredictions"]:
print("Info :: No database given for complex scoring. UMAP and HDBSCAN are performed to identify complexes.")
alignedEmbeddings = OrderedDict()
if len(self.Xs) > 1:
#correlate with each other
firstKey = list(self.Xs.keys())[0]
corrDfs = [self.Xs[firstKey].corrwith(df,axis=1,drop=True) for k,df in self.Xs.items() if k != firstKey]
mergedDf = pd.concat(corrDfs,join="inner",axis=1).mean(axis=1).sort_values(ascending=False)
topCorrFeatures = mergedDf.head(self.params["topNCorrFeaturesForUMAPAlignment"]).index
dataSets = [minMaxNorm(X.values,axis=1) for X in self.Xs.values()]
relations = []
for k,v in self.Xs.items():
if k != firstKey:
relationDict = dict([(self.Xs[prevKey].index.get_loc(idx),v.index.get_loc(idx)) for idx in topCorrFeatures])
relations.append(relationDict)
prevKey = k
print("Info :: Computing aligned UMAP using top correlated features.")
aligned_mapper = umap.aligned_umap.AlignedUMAP(**self.params["umapDefaultKwargs"]).fit(dataSets, relations=relations)
for n,umapE in enumerate(aligned_mapper.embeddings_):
key = list(self.Xs.keys())[n]
df = pd.DataFrame(umapE, index=self.Xs[key].index, columns = ["E({})_0".format(key),"E({})_0".format(key)])
alignedEmbeddings[key] = df.copy()
for analysisName in self.params["analysisName"]:
if self.params["useRawDataForDimensionalReduction"]:
print("Info :: Using raw intensity data for dimensional reduction. Not calculated distances")
if self.params["scaleRawDataBeforeDimensionalReduction"]:
X = self.Xs[analysisName]
predInts = pd.DataFrame(minMaxNorm(X.values,axis=1), index=X.index, columns = ["scaled_({})_{}".format(analysisName,colName) for colName in X.columns]).dropna()
else:
predInts = self.Xs[analysisName]
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
preCompEmbedding = alignedEmbeddings[analysisName] if analysisName in alignedEmbeddings else None,
metricColumns = self.X.columns,
scaler = None,
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = False,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
predInts.to_csv(os.path.join(pathToFolder,"predInts_{}.txt".format(analysisName)))
else:
predInts = self._loadAndFilterDistanceMatrix()
predInts[metricColumns] = minMaxNorm(predInts[metricColumns].values,axis=0)
cb.set_params(self.params["hdbscanDefaultKwargs"])
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = metricColumns,
scaler = None,
poolMethod= "min",
umapKwargs = self.params["umapDefaultKwargs"],
generateSquareMatrix = True,
)
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels({})".format(analysisName):clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by="Cluster Labels")
df = df.set_index("Entry")
if pooledDistances is not None:
pooledDistances.to_csv(os.path.join(pathToFolder,"PooledDistance_{}.txt".format(self.currentAnalysisName)),sep="\t")
squaredDf = pd.DataFrame(matrix,columns=intLabels,index=intLabels).loc[df.index,df.index]
squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
noNoiseIndex = df.index[df["Cluster Labels"] > 0]
squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}.txt".format(self.currentAnalysisName)),sep="\t")
splitLabels = True
if embedd is not None and plotEmbedding:
#save embedding
dfEmbed = pd.DataFrame(embedd, columns = ["UMAP_{}_0{}".format(analysisName,n) for n in range(embedd.shape[1])])
dfEmbed["clusterLabels({})".format(analysisName)] = clusterLabels
dfEmbed["labels({})".format(analysisName)] = intLabels
if splitLabels:
dfEmbed["sLabels"] = dfEmbed["labels"].str.split("_",expand=True).values[:,0]
dfEmbed = dfEmbed.set_index("sLabels")
else:
dfEmbed = dfEmbed.set_index("labels({})".format(analysisName))
if self.params["scaleRawDataBeforeDimensionalReduction"] and self.params["useRawDataForDimensionalReduction"]:
dfEmbed = dfEmbed.join([self.Xs[self.currentAnalysisName],predInts],lsuffix="_",rsuffix="__")
else:
dfEmbed = dfEmbed.join(self.Xs[self.currentAnalysisName])
if topCorrFeatures is not None:
dfEmbed["FeatureForUMAPAlign"] = dfEmbed.index.isin(topCorrFeatures)
saveEmbeddings.append(dfEmbed)
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embedding_{}.txt".format(analysisName)),sep="\t")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd[:,0],embedd[:,1],s=12, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"E({}).pdf".format(analysisName)))
plt.close()
pd.concat(saveEmbeddings,axis=1).to_csv(os.path.join(pathToFolder,"concatEmbeddings.txt"),sep="\t")
else:
embedd = None
if len(groupFiles) > 0:
groupMetricColumns = ["Prob_0_({})".format(analysisName) for analysisName in groupFiles]
# print(groupMetricColumns)
usePeaks = self.params["usePeakCentricFeatures"]
print("Using peaks for clustering.")
print(groupMetricColumns)
if usePeaks:
# if len(groupFiles) > 0:
eColumns = ["E1p_({})".format(groupFiles[0]),"E2p_({})".format(groupFiles[0])]
predInts = predInts[groupMetricColumns + eColumns + ["E1E2"]]
else:
predInts = predInts[groupMetricColumns + ["E1","E2","E1E2"]]
eColumns = ["E1","E2"]
#
predInts.dropna(subset=groupMetricColumns,inplace=True,thresh=1)
for n, params in enumerate(list(ParameterGrid(CLUSTER_PARAMS[clusterMethod]))):
try:
cb.set_params(params)
if clusterMethod == "HDBSCAN":
clusterLabels, intLabels, matrix , reachability, core_distances, embedd, pooledDistances = cb.fit(predInts,
metricColumns = groupMetricColumns,#,#[colName for colName in predInts.columns if "Prob_" in colName],
scaler = None,#self.classifier._scaleFeatures, #
inv = True, # after pooling by poolMethod, invert (1-X)
poolMethod="max",
preCompEmbedding = None,
entryColumns = eColumns
)
else:
clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts,
metricColumns = [colName for colName in predInts.columns if "Prob_" in colName],
scaler = self.classifier._scaleFeatures)
# clusterLabels, intLabels, matrix , reachability, core_distances = cb.fit(predInts, metricColumns = probColumn, scaler = None, inv=True, poolMethod="mean")
except Exception as e:
print(e)
print("\nWarning :: There was an error performing clustering and dimensional reduction, using the params:\n" + str(params))
continue
df = pd.DataFrame().from_dict({"Entry":intLabels,"Cluster Labels":clusterLabels,"reachability":reachability,"core_distances":core_distances})
df = df.sort_values(by=["Cluster Labels"])
if usePeaks:
df["E"] = df["Entry"].str.split("_",expand=True)[0]
df = df.set_index("E")
else:
df = df.set_index("Entry")
# clusteredComplexes = df[df["Cluster Labels"] != -1]
df, score, matchingResults = self._scoreComplexes(df)
# df = df.join(assignedIDs[["ComplexID"]])
if True:#maxScore > score: # write out all
df.to_csv(os.path.join( pathToFolder,"Complexes:{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
matchingResults.to_csv(os.path.join( pathToFolder,"ComplexPerEntry(ScoreCalc):{}_{}_{}.txt".format(groupName,n,score)),sep="\t")
print("Info :: Current best params ... ")
# squaredDf = pd.DataFrame(matrix,columns=df.index,index=df.index).loc[df.index,df.index]
# squaredDf.to_csv(os.path.join(pathToFolder,"SquaredSorted{}_{}.txt".format(groupName,n)),sep="\t")
# if usePeaks:
# noNoiseIndex = df["Entry"].loc[df["Cluster Labels"] > 0]
# else:
# noNoiseIndex = df.index[df["Cluster Labels"] > 0]
# squaredDf.loc[noNoiseIndex,noNoiseIndex].to_csv(os.path.join(pathToFolder,"NoNoiseSquaredSorted_{}_{}.txt".format(groupName,n)),sep="\t")
maxScore = score
bestDf = df
self._plotComplexProfiles(bestDf, pathToFolder, str(n))
if embedd is not None and plotEmbedding:
#save embedding
umapColumnNames = ["UMAP_{}".format(n) for n in range(embedd.shape[1])]
dfEmbed = pd.DataFrame(embedd, columns = umapColumnNames)
embedd = dfEmbed[umapColumnNames]
dfEmbed["clusterLabels"] = clusterLabels
if usePeaks:
dfEmbed["Ep"] = intLabels
dfEmbed["Entry"] = [x.split("_")[0] for x in intLabels]
else:
dfEmbed["Entry"] = intLabels
dfEmbed = dfEmbed.set_index("Entry")
dfEmbed.loc[dfEmbed.index,"ComplexID"] = df["ComplexID"].loc[dfEmbed.index]
rawDataMerge = [self.Xs[analysisName] for analysisName in groupFiles]
if n == 0:
for sampleN,fileName in enumerate(groupFiles):
rawDataMerge[sampleN].columns = ["{}_({}):F{}".format(colName,fileName,sampleN) for colName in rawDataMerge[sampleN].columns]
dfEmbed = dfEmbed.join(other = rawDataMerge)
try:
dfEmbed.to_csv(os.path.join(pathToFolder,"UMAP_Embeding_{}_{}.txt".format(n,groupName)),sep="\t")
except:
print("Saving umap embedding failed.")
#plot embedding.
fig, ax = plt.subplots()
ax.scatter(embedd["UMAP_0"].values, embedd["UMAP_1"].values,s=50, c=clusterLabels, cmap='Spectral')
plt.savefig(os.path.join(pathToFolder,"UMAP_Embedding_{}_n{}.pdf".format(groupName,n)))
plt.close()
recordScore[n] = {"score":score,"params":params}
def _loadAndFilterDistanceMatrix(self):
"""
Output to disk: 'highQualityInteractions(..).txt
However they are just the ones that show the lowest distance metrices.
Parameters
----------
Returns
-------
None
"""
metricColumns = [x if not isinstance(x,dict) else x["name"] for x in self.params["metrices"]]
dfColumns = ["E1","E2","E1E2","apexPeakDist"] + metricColumns
q = None
df = pd.DataFrame(columns = dfColumns)
filteredExisting = False
pathToFile = os.path.join(self.params["pathToComb"],"highQualityInteractions({}).txt".format(self.currentAnalysisName))
for X,nChunks in self._loadPairsForPrediction():
boolSelfIntIdx = X[:,0] == X[:,1]
X = X[boolSelfIntIdx == False]
if q is None:
df = df.append(pd.DataFrame(X, columns = dfColumns), ignore_index=True)
else:
if not filteredExisting:
#first reduce existing df
mask = df[metricColumns] < q#X[:,[n+4 for n in range(len(self.params["metrices"]))]] < q
df = df.loc[np.any(mask,axis=1)] #filtered
filteredExisting = True
toAttach = pd.DataFrame(X, columns = dfColumns)
mask = toAttach[metricColumns] < q
toAttach = toAttach.loc[np.any(mask,axis=1)]
df = df.append(toAttach, ignore_index=True)
if df.index.size > 50000 and q is None:
q = np.quantile(df[metricColumns].astype(float).values, q = 1-self.params["metricQuantileCutoff"], axis = 0)
print("Info :: {} total pairwise protein-protein pairs at any distance below 10% quantile.".format(df.index.size))
df = self._attachPeakIDtoEntries(df)
df.to_csv(pathToFile, sep="\t")
print("Info :: Saving low distance interactions in result folder.")
return df
def _plotComplexProfiles(self,complexDf,outputFolder,name):
"""
Creates line charts as pdf for each profile.
Chart has two axes, one shows realy values and the bottom one
is scaled by normalizing the highest value to one and the lowest to zero.
Enabled/Disabled by the parameter "plotComplexProfiles".
Parameters
----------
complexDf : pd.DataFrame
asd
outputFolder : string
Path to folder, will be created if it does not exist.
name : string
Name of complex.
Returns
-------
None
"""
if self.params["plotComplexProfiles"]:
toProfiles = self._makeFolder(outputFolder,"complexProfiles")
pathToFolder = self._makeFolder(toProfiles,str(name))
x = np.arange(0,len(self.X.columns))
for c in complexDf["Cluster Labels"].unique():
if c != -1:
fig, ax = plt.subplots(nrows=2,ncols=1)
entries = complexDf.loc[complexDf["Cluster Labels"] == c,:].index
lineColors = sns.color_palette("Blues",desat=0.8,n_colors=entries.size)
for n,e in enumerate(entries):
uniprotID = e.split("_")[0]
if uniprotID in self.Signals[self.currentAnalysisName]:
y = self.Signals[self.currentAnalysisName][uniprotID].Y
normY = y / np.nanmax(y)
ax[0].plot(x,y,linestyle="-",linewidth=1, label=e, color = lineColors[n])
ax[1].plot(x,normY,linestyle="-",linewidth=1, label=e, color = lineColors[n])
plt.legend(prop={'size': 5})
plt.savefig(os.path.join(pathToFolder,"{}_n{}.pdf".format(c,len(entries))))
plt.close()
def _attachPeakIDtoEntries(self,predInts):
""
if not "apexPeakDist" in predInts.columns:
return predInts
peakIds = [peakID.split("_") for peakID in predInts["apexPeakDist"]]
predInts["E1p"], predInts["E2p"] = zip(*[("{}_{}".format(E1,peakIds[n][0]),"{}_{}".format(E2,peakIds[n][1])) for n,(E1,E2) in enumerate(zip(predInts["E1"],predInts["E2"]))])
return predInts
def _makeFolder(self,*args):
""
pathToFolder = os.path.join(*args)
if not os.path.exists(pathToFolder):
os.mkdir(pathToFolder)
return pathToFolder
def _createTxtFile(self,pathToFile,headers):
""
with open(pathToFile,"w+") as f:
f.write("\t".join(headers))
def _makeTmpFolder(self, n = 0):
"""
Creates temporary fodler.
Parameters
----------
n : int
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
if self.params["analysisName"] is None:
analysisName = self._randomStr(50)
elif isinstance(self.params["analysisName"],list) and n < len(self.params["analysisName"]):
analysisName = self.params["analysisName"][n]
else:
analysisName = str(self.params["analysisName"])
#check if results folder exists.
pathToTmp = os.path.join(".","results")
if not os.path.exists(pathToTmp):
os.mkdir(pathToTmp)
self.currentAnalysisName = analysisName
date = datetime.today().strftime('%Y-%m-%d')
self.params["Date of anaylsis"] = date
runName = self.params["runName"] if self.params["runName"] is not None else self._randomStr(3)
self.params["pathToComb"] = self._makeFolder(pathToTmp,"{}_n({})runs".format(runName,len(self.params["analysisName"])))
print("Info :: Folder created in which combined results will be saved: " + self.params["pathToComb"])
pathToTmpFolder = os.path.join(self.params["pathToComb"],analysisName)
if os.path.exists(pathToTmpFolder):
print("Info :: Path to results folder exsists")
if self.params["restartAnalysis"]:
print("Warning :: Argument restartAnalysis was set to True .. cleaning folder.")
#to do - shift to extra fn
for root, dirs, files in os.walk(pathToTmpFolder):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
else:
print("Info :: Will take files from there, if they exist")
return pathToTmpFolder
try:
self._makeFolder(pathToTmpFolder)
print("Info :: Result folder created -- ",analysisName)
self._makeFolder(pathToTmpFolder,"chunks")
print("Info :: Chunks folder created/checked")
self._makeFolder(pathToTmpFolder,"result")
print("Info :: Result folder created/checked")
self._makeFolder(pathToTmpFolder,"result","alignments")
print("Info :: Alignment folder created/checked")
self._makeFolder(pathToTmpFolder,"result","modelPlots")
print("Info :: Result/modelPlots folder created/checked. In this folder, all model plots will be saved here, if savePlots equals true, otherwise empty.")
# self._createTxtFile(pathToFile = os.path.join(pathToTmpFolder,"runTimes.txt"),headers = ["Date","Time","Step","Comment"])
return pathToTmpFolder
except OSError as e:
print(e)
raise OSError("Could not create result folder due to OS Error")
def _handleComptabFormat(self,X,filesToLoad):
"""
Extracts different samples from comptab format.
Parameters
----------
X : str
Path to folder where comptab files is located
filesToLoad:
list of txt/tsv files present in the folder
Returns
-------
detectedDataFrames : list of pd.DataFrame
list of identified data farmes from compbat file
fileNames : list of str
Internal names <comptabfileName>:<sampleName>
"""
detectedDataFrames = []
fileNames = []
for fileName in filesToLoad:
comptFile = pd.read_csv(os.path.join(X,fileName), sep="\t", header=[0,1], index_col=0)
columnsToKeep = [colNameTuple for colNameTuple in comptFile.columns if "unique peptides" not in colNameTuple and "coverage" not in colNameTuple and "protein length" not in colNameTuple]
comptFile = comptFile[columnsToKeep]
#find unique sample names given in the first header
samples = np.unique([colNameTuple[0] for colNameTuple in comptFile.columns])
for sampleName in samples:
sampleColumns = [colNameTuple for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName]
dataFrame = pd.DataFrame(comptFile[sampleColumns].values,
columns = [colNameTuple[1] for colNameTuple in comptFile.columns if colNameTuple[0] == sampleName])
dataFrame["Uniprot ID"] = comptFile.index
detectedDataFrames.append(dataFrame)
fileNames.append("{}:{}".format(fileName,sampleName))
return detectedDataFrames, fileNames
def _mergeDistancesForGroups(self):
""
def run(self,X, maxValueToOne = False):
"""
Runs the ComplexFinder Script.
Parameters
----------
X : str, list, pd.DataFrame
Returns
-------
pathToTmp : str
ansolute path to tmp/anlysis name folder.
"""
self.allSamplesFound = False
self.entriesInChunkLoaded = False
global entriesInChunks
if isinstance(X,list) and all(isinstance(x,pd.DataFrame) for x in X):
if self.params["compTabFormat"]:
raise TypeError("If 'compTabFormat' is True. X must be a path to a folder. Either set compTabFormat to False or provide a path.")
print("Multiple dataset detected - each one will be analysed separetely")
if self.params["analysisName"] is None or not isinstance(self.params["analysisName"],list) or len(self.params["analysisName"]) != len(X):
self.params["analysisName"] = [self._randomStr(10) for n in range(len(X))] #create random analysisNames
print("Info :: 'anylsisName' did not match X shape. Created random strings per dataframe.")
elif isinstance(X,str):
if os.path.exists(X):
loadFiles = [f for f in os.listdir(X) if f.endswith(".txt") or f.endswith(".tsv")]
if self.params["compTabFormat"]:
Xs, loadFiles = self._handleComptabFormat(X,loadFiles)
else:
Xs = [pd.read_csv(os.path.join(X,fileName), sep="\t") for fileName in loadFiles]
#filterId = pd.read_csv(os.path.join("filter","SPY.txt"),index_col=None)
#Xs = [X.loc[X["Protein.Group"].isin(filterId["MouseMito"].values)] for X in Xs]
self.params["analysisName"] = loadFiles
if maxValueToOne:
maxValues = pd.concat([x.max(axis=1) for x in X], axis=1).max(axis=1)
normValueDict = dict([(X[0][self.params["idColumn"]].values[n],maxValue) for n,maxValue in enumerate(maxValues.values)])
self.params["normValueDict"] = normValueDict
else:
raise ValueError("Provided path {} does not exist.".format(X))
elif isinstance(X,pd.DataFrame):
Xs = [X]
self.params["analysisName"] = [self._randomStr(10)]
else:
ValueError("X must be either a string, a list of pandas data frames or pandas data frame itself.")
self.params["pathToTmp"] = {}
statColumns = ["nInteractions ({})".format(self.params["interactionProbabCutoff"]),"nPositiveInteractions","OOB_Score","ROC_Curve_AUC","Metrices","Classifier","ClassifierParams"]
self.stats = pd.DataFrame(index = self.params["analysisName"],columns = statColumns)
self.params["rawData"] = {}
self.params["runTimes"] = {}
self.params["runTimes"]["StartTime"] = time.time()
for n,X in enumerate(Xs):
pathToTmpFolder = self._makeTmpFolder(n)
self.params["pathToTmp"][self.currentAnalysisName] = pathToTmpFolder
if n == 0:
pathToParams = os.path.join(self.params["pathToComb"],"params.json")
pd.DataFrame().from_dict(self.params,orient="index").sort_index().to_json(pathToParams,indent = 4, orient="columns")
print("Info :: Parameters saved to output folder.")
if os.path.exists(os.path.join(self.params["pathToComb"],"runTimes.txt")):
if not self.params["restartAnalysis"] and not self.params["recalculateDistance"] and not self.params["retrainClassifier"]:
print("Warning :: Analysis done. Aborting (detected by finding the file 'runTimes.txt'")
return
print("------------------------")
print("--"+self.currentAnalysisName+"--")
print("--------Started---------")
print("--Signal Processing &--")
print("------Peak Fitting------")
print("------------------------")
if pathToTmpFolder is not None:
#loading data
self._load(X)
#self._checkGroups()
self._findPeaks(self.params["n_jobs"])
self._collectRSquaredAndFitDetails()
self._saveSignals()
combinedPeakModel = self._combinePeakResults()
self._attachQuantificationDetails(combinedPeakModel)
endSignalTime = time.time()
self.params["runTimes"]["SignalFitting&Comparision"] = time.time() - self.params["runTimes"]["StartTime"]
if not self.params["justFitAndMatchPeaks"]:
print("Info :: Peak modeling done. Starting with distance calculations and predictions (if enabled)..")
self._createSignalChunks()
for n,X in enumerate(X):
if n < len(self.params["analysisName"]): #happnes if others than txt file are present
self.currentAnalysisName = self.params["analysisName"][n]
print(self.currentAnalysisName," :: Starting distance calculations.")
self._calculateDistance()
self._mergeDistancesForGroups()
self.params["runTimes"]["Distance Calculation"] = time.time() - endSignalTime
distEndTime = time.time()
self._loadReferenceDB()
for analysisName in self.params["analysisName"]:
self._addMetricesToDB(analysisName)
dataPrepEndTime = time.time()
self.params["runTimes"]["Database Preparation"] = dataPrepEndTime - distEndTime
self._trainPredictor(self.params["addImpurity"])
for analysisName in self.params["analysisName"]:
self.currentAnalysisName = analysisName
self._predictInteractions()
#
#save statistics
self.stats.to_csv(os.path.join(self.params["pathToComb"],"statistics.txt"),sep="\t")
#combine interactions
if not self.params["noDistanceCalculationAndPrediction"]:
if not self.params["noDatabaseForPredictions"]:
combinedInteractions = self._combineInteractionsAndClusters()
else:
print("Warning/Info :: noDistancenCalculationAndPrediction is True, skipping combineInteraction step.")
endTrainingTime = time.time()
self.params["runTimes"]["Classifier Training & Prediction"] = endTrainingTime - dataPrepEndTime
if not self.params["noDistanceCalculationAndPrediction"] and len(self.params["grouping"]) > 0 and not self.params["noDatabaseForPredictions"]:
for groupName,groupFileNames in self.params["grouping"].items():
if isinstance(groupFileNames,str):
groupFileNames = [groupFileNames]
self._clusterInteractions(combinedInteractions,groupFiles = groupFileNames,groupName = groupName)
else:
print("Info :: Cluster Interactions")
self._clusterInteractions(None)
self.params["runTimes"]["Interaction Clustering and Embedding"] = time.time() - endTrainingTime
print("Info :: Run Times :: ")
print(self.params["runTimes"])
pd.DataFrame().from_dict(self.params["runTimes"],orient="index").to_csv(os.path.join(self.params["pathToComb"],"runTimes.txt"),sep="\t")
print("Info :: Analysis done.")
def _combinePredictedInteractions(self, pathToComb):
"""
Combines predicted Interactions based on the output
files : predictedInteractions[..].txt of each run.
Parameters
----------
pathToComb : str, path to combined result folder.
Returns
-------
combResults : pd.DataFrame
combined data frame for each run. All metrices and predictions are provided.
"""
pathToInteractions = os.path.join(pathToComb,"combinedInteractions.txt")
if False and os.path.exists(pathToInteractions) and not self.params["retrainClassifier"]:
combResults = pd.read_csv(pathToInteractions,sep="\t")
combResults = self._filterCombinedInteractions(combResults)
print("Info :: Combined interactions found and loaded.")
return combResults
print("Info :: Combining interactions of runs.")
preditctedInteractions = []
for analysisName in self.params["analysisName"]:
pathToResults = os.path.join(self.params["pathToTmp"][analysisName],"result")
pathToPrediction = os.path.join(pathToResults,"predictedInteractions{}_{}_{}.txt".format(self.params["metricesForPrediction"],self.params["classifierClass"],self.params["addImpurity"]))
if os.path.exists(pathToPrediction):
df = pd.read_csv(pathToPrediction,sep="\t", low_memory=False).set_index(["E1E2","E1","E2"])
df = df.loc[df["Prob_0"] > self.params["interactionProbabCutoff"]]
preditctedInteractions.append(df)
else:
raise ValueError("Warning :: PredictedInteractions not found. " + str(pathToPrediction))
for n,df in enumerate(preditctedInteractions):
analysisName = self.params["analysisName"][n]
if n == 0:
combResults = df
combResults.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
combResults[analysisName] = pd.Series(["+"]*df.index.size, index = df.index)
else:
df.columns = ["{}_({})".format(colName,analysisName) for colName in df.columns]
#columnNames = [colName for colName in df.columns if colName] # we have them already from n = 0
df[analysisName] = | pd.Series(["+"]*df.index.size, index = df.index) | pandas.Series |
import matplotlib.pyplot as plt
import pandas
import csv
import os
import re
from pprint import pprint
training_group = {}
val_group = {}
epoch_count = range(0, 50)
pattern = re.compile('.+#.+\.csv')
for filename in os.listdir('results_csv/plot_target'):
if pattern.match(filename):
csv_name = re.search('#.+', filename).group(0)[1:-4].split('_')
train_label_acc = 'train_acc'
val_label_acc = 'val_acc'
df = pandas.read_csv(f"results_csv/plot_target/{filename}")
training_group[f"{csv_name[0]}_{csv_name[2]}_{csv_name[3][0]}"] = | pandas.to_numeric(df[train_label_acc]) | pandas.to_numeric |
from operator import is_
import shap
from explainx.lib.utils import is_classification
import pandas as pd
import numpy as np
class ShapleyValues():
def __init__(self, model,input_data, target_data, ct):
#super().__init__(model, input_data, ct)
self.model = model
self.input_data = input_data
self.actual_data = target_data
self.ct = ct
#self.row_number = 0
def tree_explainer(self):
explainer = shap.TreeExplainer(self.model)
try:
#classification case
predictions = self.model.predict
probabilities = self.model.predict_proba
return explainer, predictions, probabilities
except:
#regression case
predictions = self.model.predict
return explainer, predictions
def kernel_explainer(self):
try:
#classification case
explainer = shap.KernelExplainer(self.model.predict_proba,
shap.sample(self.input_data, 100),
link='logit',
feature_names=self.input_data.columns,
seed=0)
predictions = self.model.predict
prediction_probabilities = self.model.predict_proba
return explainer, predictions, prediction_probabilities
except:
#regression case
explainer = shap.KernelExplainer(self.model.predict,
shap.sample(self.input_data, 100),
link='identity',
feature_names=self.input_data.columns,
seed=0)
predictions = self.model.predict
return explainer, predictions
def kernel_explainer_with_ct(self):
try:
#classification case
pred_fcn = lambda x : self.model.predict_proba(self.ct.transform(x))
explainer = shap.KernelExplainer(pred_fcn, shap.sample(self.input_data, 100),
link='logit',
feature_names=self.input_data.columns,
seed=0)
pred = lambda x : self.model.predict(self.ct.transform(x))
return explainer, pred, pred_fcn
except:
pred_fcn = lambda x : self.model.predict(self.ct.transform(x))
explainer = shap.KernelExplainer(pred_fcn, shap.sample(self.input_data, 100),
link='identity',
feature_names=self.input_data.columns,
seed=0)
return explainer, pred_fcn
def shap_explainer(self):
if is_classification(self.model):
if self.ct == None:
try:
explainer, pred, pred_prob = self.tree_explainer()
return explainer, pred, pred_prob
except:
try:
explainer, pred, pred_prob = self.kernel_explainer()
return explainer, pred, pred_prob
except:
raise Exception(("{} not supported. Please create an issue on Github").format(self.model))
else:
try:
explainer, pred, pred_fcn = self.kernel_explainer_with_ct()
return explainer, pred, pred_fcn
except:
raise Exception(("{} not supported. Please create an issue on Github").format(self.model))
else:
if self.ct == None:
try:
explainer, pred = self.tree_explainer()
return explainer, pred
except:
try:
explainer, pred = self.kernel_explainer()
return explainer, pred
except:
raise Exception(("{} not supported. Please create an issue on Github").format(self.model))
else:
try:
explainer, pred_fcn = self.kernel_explainer_with_ct()
return explainer, pred_fcn
except:
raise Exception(("{} not supported. Please create an issue on Github").format(self.model))
def append_shap_values_to_df(self, input_sv, in_data, scope):
df_shap = | pd.DataFrame(input_sv) | pandas.DataFrame |
'''
Created on Oct 23, 2017
@author: ronaldmaceachern
modules for methods to clean dirty .csv using a set of rules
'''
import re
import os
import numpy as np
import pandas as pd
from difflib import SequenceMatcher
def makeRowDf( x, desc_name = '', cols = None):
'''a wrapper function to make it neater to make a row table for column summary
'''
ncol = len(cols)
return pd.DataFrame( np.array([desc_name] + x).reshape(1,ncol), columns = cols)
def checkIfColLikeRow(df, n = 5):
cols = list(df.columns)
#increment down a few rows
out = []
for i in np.arange(n):
#extract a row and convert to str
rw = np.array([ str(x) for x in df.iloc[i,:].values])
#see how similar to columns
sim = np.array([ SequenceMatcher(None, rw[i], cols[i]).ratio() for i in np.arange(df.shape[1])])
out += [sim.reshape(1, len(sim))]
#stack the results
out = np.concatenate(out, axis = 0)
#average over the rows
out = np.mean(out, axis = 0)
return out
def checkIfColInFirstRow(df):
'''
check if columns are in first rows
'''
res = None
## column fix - could be a class
##rule1: check if first row belongs as column name
r1 = df.iloc[0,:]
#drop the NaN's
r1 = r1.dropna()
#check if non NaNs are strings
#are they all?
allStr = np.array([isinstance(x,str) for x in r1]).all()
#if they're all str, combine with column names - this is
if allStr:
print('column names found in first row, putting into columns')
cols = list(r1.index)
oldCol = np.array(df.columns)
newcol = df.columns[df.columns.isin(cols)] +'_' +r1.values
oldCol[df.columns.isin(cols)] = newcol
res = oldCol
#drop row with bad values
#df = df.drop(0)
return res
def checkForExpression(df, expression = '', return_bool = False):
'''check each element to see if matches a regular expression
if return_bool is True return an array of bools (same size as df)
other wise return percentages that matched
if an element is null (according to pd.isnull()) False is given
Example:
--------
expression for floats: "^\d+?\.\d+?$"
expression for less than "<"
'''
#store results in an array
out = []
#increment over the columns
for i in np.arange(df.shape[1]):
#extract column values
x = df.iloc[:,i].values
# if it's not null and expression is matched
y = np.array([ False if pd.isnull(element) else not re.match(expression,element) is None for element in x ])
#if return bool, return an array of bool
if return_bool:
out += [y.reshape(len(y), 1)]
else:
out += [y.mean()]
if return_bool:
out = np.concatenate(out, axis = 1)
else:
out = np.array(out)
return out
def checkNanPerRow(df):
out = []
#increment over each row, count the number not null
for i in np.arange(df.shape[0]):
out += [np.array([ | pd.isnull(x) | pandas.isnull |
import logging
import sys
from os import environ as env
from time import time
from gitlabdata.orchestration_utils import (
dataframe_uploader,
dataframe_enricher,
snowflake_engine_factory,
)
import pandas as pd
from sqlalchemy.engine.base import Engine
def single_query_upload(query: str, table_name: str) -> pd.DataFrame:
"""
Takes a single query and uploads to raw.snowflake
"""
snowflake_engine_sysadmin = snowflake_engine_factory(config_dict, "SYSADMIN")
connection = snowflake_engine_sysadmin.connect()
results = pd.read_sql(sql=query, con=connection)
connection.close()
snowflake_engine_sysadmin.dispose()
snowflake_engine_loader = snowflake_engine_factory(config_dict, "LOADER")
dataframe_uploader(results, snowflake_engine_loader, table_name, "snowflake")
snowflake_engine_loader.dispose()
return results
def iterative_query_upload(
dataframe: pd.DataFrame, column: str, base_query: str, table_name: str
) -> None:
"""
Takes a pandas dataframe, iterates on a given column, builds a final result set,
and uploads to raw.snowflake.
"""
snowflake_engine_sysadmin = snowflake_engine_factory(config_dict, "SYSADMIN")
connection = snowflake_engine_sysadmin.connect()
results_all = []
for index, row in dataframe.iterrows():
ref_column = row[column]
query = f"{base_query} {ref_column};"
results = pd.read_sql(sql=query, con=connection)
results_all.append(results)
results_all = | pd.concat(results_all) | pandas.concat |
# -*- coding: utf-8 -*-
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui,QtWidgets
from pyqtgraph.dockarea import *
from time import perf_counter
from pyqtgraph.graphicsItems.GradientEditorItem import Gradients
import sys
import numpy as np
import os
from functools import partial
import time
from openpyxl import load_workbook
import pandas as pd
import time
pg.mkQApp()
## Define main window class from template
path = os.path.dirname(os.path.abspath(__file__))
uiFile = os.path.join(path, 'GUIDE.ui')
WindowTemplate, TemplateBaseClass = pg.Qt.loadUiType(uiFile)
### BEGIN Modele class ###
class Modele():
def __init__(self):
# Allow importing any file provided as argument in the form: python3 GUIDE.py -f model_input
if len(sys.argv) > 1:
import importlib
option_name = sys.argv[1]
assert option_name == '-f', f"option '{option_name}' not understood. Known option is only '-f' with filename as value"
assert len(sys.argv)>=3, "provide a filename to load parameters from"
lib_path = sys.argv[2]
if lib_path.endswith('.py'): lib_path = lib_path.rstrip('.py')
input_file = importlib.import_module(lib_path.replace('/','.'))
else:
import model_input as input_file
# Loading plots configuration (used in MainWindow class)
docks = input_file.load_docks()
setattr(self,'docks',docks)
# Loading window parameters
window_params = input_file.window_params
if 'streaming' not in window_params.keys(): self.streaming = True
for window_param in window_params.keys():
setattr(self,window_param,window_params[window_param])
# Tracking time
self.nstep = 0
self.time_stamp = np.zeros(self.array_size).astype(np.float64)
# Loading parameters
params = input_file.load_params()
setattr(self,'params',params)
for param in self.params.keys():
if isinstance(self.params[param]['step'],int): typ = int
else: typ = np.float64
self.params[param]['value'] = self.params[param]['init_cond'] * np.ones(self.array_size).astype(typ)
# Set default plot for params to False if none provided
for param in self.params.keys():
if 'plot' not in self.params[param].keys():
self.params[param]['plot'] = False
# Loading variables
variables = input_file.load_variables()
setattr(self,'variables',variables)
# Loading observables
observables = input_file.load_observables()
setattr(self,'observables',observables)
# List as defined in the input file (as observables are added to variables dict)
list_variables = list(self.variables.keys())
list_observables = list(self.observables.keys())
# Concatenate the variables and observables dict (if 'invert_var_obs' then invert order observables and variables are displayed)
if not 'invert_order_obs_var' in window_params.keys(): self.variables = dict(self.variables, **self.observables)
else:
if window_params['invert_order_obs_var']:
self.variables = dict(self.observables, **self.variables)
else:
self.variables = dict(self.variables, **self.observables)
# Build main dict of variables
for variable in self.variables.keys():
self.variables[variable]['value'] = self.variables[variable]['init_cond'] * np.ones(self.array_size).astype(self.variables[variable]['type'])
if variable in list_variables:
self.variables[variable]['observable'] = False
elif variable in list_observables:
self.variables[variable]['observable'] = True
# Assert no params, variables and observables are called the same
assert len(set(list_variables)&set(list_observables))==0 and len(set(list_variables)&set(list(self.params.keys())))==0 and len(set(list(self.params.keys()))&set(list_observables))==0, f"Repeated name for variables, observables and/or parameters"
# Set default plot for variables to True if none provided
for variable in self.variables.keys():
if 'plot' not in self.variables[variable].keys():
self.variables[variable]['plot'] = True
# Loading equations into keyword 'equation' in variables dict
# 'diff_eq_' and 'eq_' are default patterns for variables and observables respectively
pattern_variables = 'diff_eq_'
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_variables)]:
variable = key.split(pattern_variables)[-1]
if variable not in list_variables:
print(f"Warning: Equation for Variable {variable} not used or not understood")
continue
if 'equation' in self.variables[variable].keys(): continue
self.variables[variable]['equation'] = input_file.__dict__[key]
pattern_observables = 'eq_'
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_observables)]:
variable = key.split(pattern_observables)[-1]
if variable not in list_observables:
print(f"Warning: Equation for Observable {variable} not used or not understood")
continue
if 'equation' in self.variables[variable].keys(): continue
self.variables[variable]['equation'] = input_file.__dict__[key]
# Create dict of the usable kernels
self.kernels = {}
pattern_kernels = 'kernel_'
for key in [attr for attr in self.__dir__() if attr.startswith(pattern_kernels)]:
kernel = key.split(pattern_kernels)[-1]
self.kernels[kernel] = {}
self.kernels[kernel]['value'] = getattr(self,key)
for key in [attr for attr in input_file.__dict__.keys() if attr.startswith(pattern_kernels)]:
kernel = key.split(pattern_kernels)[-1]
self.kernels[kernel] = {}
self.kernels[kernel]['value'] = input_file.__dict__[key]
# Load additional keyboard keys if any provided
self.user_defined_keyPressEvent = input_file.keyboard_keys()
if self.user_defined_keyPressEvent is None: self.user_defined_keyPressEvent = {} # if None provided
system_reserved_keys = [" ", "<KEY>"]
for user_defined_key in self.user_defined_keyPressEvent.keys():
assert user_defined_key not in system_reserved_keys, f"User defined key '{user_defined_key}' in system reserved ones {system_reserved_keys}"
########################### BEGIN Assertions input file ###########################
# 'dock' (variables): Not providing dock_name that doesn't exist
for variable in self.variables.keys():
if 'dock' in self.variables[variable]:
for dock_name in self.variables[variable]['dock']:
if not isinstance(dock_name,dict):
assert dock_name in self.docks.keys(), f"Dock name '{dock_name}' for variable {variable} not understood. Dock name must be in {list(self.docks.keys())}"
# all variables have an equation
for variable in self.variables.keys():
assert 'equation' in self.variables[variable].keys(), f"An equation for variable {variable} must be provided"
########################### END Assertions input file ###########################
def simulator(self):
""" Calculate 1 time step and update arrays """
# Actual computation (pass only the 'value' keyword of each sub-dictionnary)
self.computation_result_dict = self.kernels[self.kernel]['value']({key:value['value'][-1] for (key,value) in self.variables.items() if not value['observable']},{key:value['value'][-1] for (key,value) in self.params.items()}) # use last value of all variables for the computations of next step
# Update last values to the newest calculated
for variable in self.variables.keys():
if not self.variables[variable]['observable']:
# Simpler concatenate replacing directly indices
self.variables[variable]['value'][:-1] = self.variables[variable]['value'][1:]
self.variables[variable]['value'][-1] = self.computation_result_dict[variable]
# Evaluate observables
self.update_observables()
def update_observables(self):
for variable in self.variables.keys():
if self.variables[variable]['observable']:
self.obs_computation_result = self.variables[variable]['equation'](self,{key:value['value'] for (key,value) in self.variables.items()},{key:value['value'][-1] for (key,value) in self.params.items()})
if 'calculation_size' in self.variables[variable].keys() and self.variables[variable]['calculation_size']:
self.variables[variable]['value'] = self.obs_computation_result
else:
try: index = len(self.obs_computation_result)
except TypeError: index = 1 # If return only a single value
self.variables[variable]['value'][:-index] = self.variables[variable]['value'][index:]
self.variables[variable]['value'][-index:] = self.obs_computation_result
def kernel_euler(self, variables, params):
""" N variables Euler algorithm (A = A + dt * eq_A(params)) """
new_variables = {}
for variable_name in variables.keys():
new_variables[variable_name] = variables[variable_name] + self.step_size * self.variables[variable_name]['equation'](self,variables,params)
return new_variables
def kernel_RK4(self, variables, params):
""" N variables RK4 algorithm """
temp_variables = variables.copy()
# Loop for each coefficient on all equations
coefs_1 = {}
for variable_name in variables.keys():
coefs_1[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_2 = {}
for variable_name in variables.keys(): # evaluate variables first
temp_variables[variable_name] = variables[variable_name] + (self.step_size/2.)*coefs_1[variable_name]
for variable_name in variables.keys():
coefs_2[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_3 = {}
for variable_name in variables.keys():
temp_variables[variable_name] = variables[variable_name] + (self.step_size/2.)*coefs_2[variable_name]
for variable_name in variables.keys():
coefs_3[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
coefs_4 = {}
for variable_name in variables.keys():
temp_variables[variable_name] = variables[variable_name] + self.step_size*coefs_3[variable_name]
for variable_name in variables.keys():
coefs_4[variable_name] = self.variables[variable_name]['equation'](self,temp_variables,params)
new_variables = {}
for variable_name in variables.keys():
new_variables[variable_name] = variables[variable_name] + (self.step_size/6.)*(coefs_1[variable_name]+2*coefs_2[variable_name]+2*coefs_3[variable_name]+coefs_4[variable_name])
return new_variables
### BEGIN MainWindow class ###
class MainWindow(TemplateBaseClass,Modele):
def __init__(self):
# Extra useful attributes
self.fps = None
self.lastTime = perf_counter()
self.colors_dict = {'b':{'rgb':(31,119,180),'hex':'#1f77b4'},'o':{'rgb':(255,127,14),'hex':'#ff7f0e'},'g':{'rgb':(44,160,44),'hex':'#2ca02c'},'r':{'rgb':(214,39,40),'hex':'#d62728'},'p':{'rgb':(148,103,189),'hex':'#9467bd'},'y':{'rgb':(255,255,0),'hex':'#ffff00'},'brown':{'rgb':(140,86,75),'hex':'#8c564bq'},'pink':{'rgb':(227,119,194),'hex':'#e377c2'},'grey':{'rgb':(127,127,127),'hex':'#7f7f7f'},'c':{'rgb':(23,190,207),'hex':'#7f7f7f'}}
self.flag_colormaps = 1
self.colormaps_list = ['thermal','yellowy','greyclip','grey','viridis','inferno']
# Create variables and parameters
#Modele.__init__(self) # Commented as called by TemplateBaseClass.__init__(self)
# Load UI
TemplateBaseClass.__init__(self) # This seems to call Modele.__init__(self) => Commenting the first occurence
self.setWindowTitle('Graphical User Interface for Differential Equations (GUIDE)')
# Create the main window
self.ui = WindowTemplate()
self.ui.setupUi(self)
try: self.resize(*self.window_size)
except: pass
# Set main theme from self.window_params['theme']
if 'theme' in self.__dict__.keys() and self.theme == 'dark':
QtGui.QApplication.setStyle("Fusion")
self.palette = self.palette()
self.palette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Base, QtGui.QColor(25, 25, 25))
self.palette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.black)
self.palette.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Text, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
self.palette.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.white)
self.palette.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)
self.palette.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))
self.palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))
self.palette.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)
self.setPalette(self.palette)
# Button, sliders and spinboxes drawn in qtdesigner
#ICs_button
self.ui.ICs_button.clicked.connect(self.update_ICs_button)
self.ui.ICs_button.keyPressEvent = self.keyPressEvent
#nstep_slider
self.ui.nstep_slider.setRange(1,int(self.array_size/10))
self.ui.nstep_slider.setValue(self.nstep_update_plot)
self.ui.nstep_slider.valueChanged.connect(self.update_nstep_slider)
#nstep_spinbox
self.ui.nstep_spinbox.setRange(1,int(self.array_size/10))
self.ui.nstep_spinbox.setSingleStep(1)
self.ui.nstep_spinbox.setValue(self.nstep_update_plot)
self.ui.nstep_spinbox.setKeyboardTracking(False) # emit signal only when enter is pressed
self.ui.nstep_spinbox.valueChanged.connect(self.update_nstep_spinbox)
#fps_label
self.update_fps_label()
#record_label
self.ui.record_label.setText(' Rec. ')
########################## BEGIN figure layout and docks ##########################
# Dock declaration and initial placement
self.main_dock_area = self.ui.dock_area
for dock_name in self.docks.keys():
self.add_dock(dock_name) # add 'dock' and 'region' keywords into self.docks[dock_name]
# Declaration of the plots in respective docks
accepted_dock_types = ['plot1D','plot2D','image']
assert self.docks[dock_name]['type'] in accepted_dock_types, f"Dock type '{self.docks[dock_name]['type']}' not understood. Dock type must be in {accepted_dock_types}"
flag2 = 0
alpha_factor_linearregion = 60 # 0 -> 255
self.warning_observables_docks = []
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'plot1D':
self.create_PlotWidget(dock_name) # add 'actual_plot' keyword into self.docks[dock_name]
# Attribution of the curves to the plots
flag = 0
self.docks[dock_name]['curve'] = {}
# Create curves objects for variables, observables and params
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['curve'][variable] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
else:
self.docks[dock_name]['curve'][variable] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
flag += 1
for param in self.params.keys():
if 'dock' in self.params[param].keys():
if dock_name in self.params[param]['dock']:
self.docks[dock_name]['curve'][param] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
else:
self.docks[dock_name]['curve'][param] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
flag += 1
if 'zoomOf' in self.docks[dock_name].keys():
relatedTo = self.docks[dock_name]['zoomOf']
# Create region and store in its according plot dict
self.docks[relatedTo]['region'][dock_name] = pg.LinearRegionItem([self.array_size/2.-self.array_size/30.,self.array_size/2.+self.array_size/30.],brush=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag2,len(self.colors_dict))]]['rgb']+(alpha_factor_linearregion,))
self.docks[relatedTo]['region'][dock_name].setZValue(-10)
self.docks[relatedTo]['actual_plot'].addItem(self.docks[relatedTo]['region'][dock_name])
self.docks[relatedTo]['region'][dock_name].sigRegionChanged.connect(partial(self.update_zoom_plot,dock_name,relatedTo))
# Link region and zoom plot
self.docks[dock_name]['actual_plot'].sigXRangeChanged.connect(partial(self.update_xzoom_region,dock_name,relatedTo))
flag2 += 1
### WARNING Does not work probably due to an internal bug (waiting for answer)
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
#self.docks[dock_name]['actual_plot'].setYLink(self.docks[relatedTo]['actual_plot'])
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.update_zoom_plot(dock_name,relatedTo)
elif self.docks[dock_name]['type'] == 'plot2D':
self.create_PlotWidget(dock_name)
# Attribution of the curves to the plots
flag = 0
self.docks[dock_name]['curve'] = {}
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
# if element of 'dock' (variables/observables) is a dict
for element_variable_dock in self.variables[variable]['dock']:
if isinstance(element_variable_dock,dict):
if dock_name in element_variable_dock.keys():
for real_dock_name in element_variable_dock.keys():
# assert only two variables to plot
assert len(element_variable_dock[real_dock_name]) == 2, f"list of variables/observables to plot on {real_dock_name} with dock type 'plot2D' must be exactly of length 2, provided was {len(element_variable_dock[real_dock_name])}"
list_variables_to_plot = element_variable_dock[real_dock_name]
# assert variables provided do exist
for variables_to_plot in list_variables_to_plot:
assert variables_to_plot in self.variables.keys() or variables_to_plot in self.params.keys(),f"variable '{variables_to_plot}' in 'dock' key of variable '{variable}' (variables/observables/params dictionnary) not understood. Must be in {list(dict(self.variables, **self.params).keys())}"
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)] = {}
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['curve'] = self.docks[dock_name]['actual_plot'].plot(pen=self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['rgb'])
self.docks[dock_name]['curve'][variable+'_plot2D_'+str(flag)]['variables_to_plot'] = list_variables_to_plot
flag += 1
else:
# Check validity of the provided dock_names
for real_dock_name in element_variable_dock.keys():
if real_dock_name not in self.docks.keys():
if [variable,element_variable_dock] not in self.warning_observables_docks:
self.warning_observables_docks.append([variable,element_variable_dock]) # to throw error only once
print(f"WARNING: check validity of dock_names you provided in the variables/observable dictionnary: {list(element_variable_dock.keys())}'")
if flag == 0: # Nothing plotted on the 'plot2D'
print(f"WARNING: nothing has been plotted on the 'plot2D' dock with name '{dock_name}'")
if 'zoomOf' in self.docks[dock_name].keys():
pass
elif self.docks[dock_name]['type'] == 'image':
self.create_ImageView(dock_name)
self.docks[dock_name]['actual_plot'].keyPressEvent = self.keyPressEvent
#self.docks[dock_name]['actual_plot'].enableAutoRange('xy', True)
########################## END figure layout and docks ##########################
############################ BEGIN Trees declaration ############################
# Variables Tree
self.tree = self.ui.tree
self.tree.setColumnCount(3)
self.tree.keyPressEvent = self.keyPressEvent # allow keys catching for focus on trees
self.tree.setHeaderLabels(['Variables','IC','plot'])
flag = 0
for variable in self.variables.keys():
temp = pg.TreeWidgetItem([variable])
temp.setForeground(0,QtGui.QBrush(QtGui.QColor(self.colors_dict[list(self.colors_dict.keys())[np.mod(flag,len(self.colors_dict))]]['hex'])))
# Create linedit (variables only)
if not self.variables[variable]['observable']:
self.variables[variable]['lineedit'] = QtGui.QLineEdit()
temp.setWidget(1, self.variables[variable]['lineedit'])
self.variables[variable]['lineedit'].setText(str(self.variables[variable]['value'][-1])) # set initial value
self.variables[variable]['lineedit'].returnPressed.connect(partial(self.update_lineedit_variable,variable))
# Create checkbox
self.variables[variable]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(2, self.variables[variable]['checkbox'])
self.tree.addTopLevelItem(temp)
self.variables[variable]['checkbox'].setChecked(self.variables[variable]['plot']) # set initial state
self.variables[variable]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.variables[variable]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,variable)) # connect checkbox
flag += 1
# Params Tree
self.tree_params = self.ui.tree_params
self.tree_params.setColumnCount(4)
self.tree_params.keyPressEvent = self.keyPressEvent
self.tree_params.setHeaderLabels(['Params','plot','value','slider'])
self.spinbox_precision = 3
for param in self.params.keys():
self.params[param]['slider_conversion_factor'] = int(1./self.params[param]['step']) # To test was: 5000 *10000
temp = pg.TreeWidgetItem([param])
# Spin boxes
self.params[param]['spinbox'] = QtGui.QDoubleSpinBox()
self.params[param]['spinbox'].setRange(self.params[param]['min'],self.params[param]['max'])
self.params[param]['spinbox'].setSingleStep(self.params[param]['step'])
if isinstance(self.params[param]['step'],int):
self.params[param]['spinbox'].setDecimals(0)
else:
self.params[param]['spinbox'].setDecimals(self.spinbox_precision)
temp.setWidget(2, self.params[param]['spinbox'])
self.tree_params.addTopLevelItem(temp)
self.params[param]['spinbox'].setValue(self.params[param]['value'][-1])
self.params[param]['spinbox'].setKeyboardTracking(False) # emit signal only when enter is pressed
self.params[param]['spinbox'].valueChanged.connect(partial(self.update_slider_params,param))
# Sliders
self.params[param]['slider'] = QtGui.QSlider()
self.params[param]['slider'].setRange(int(self.params[param]['min']*self.params[param]['slider_conversion_factor']),int(self.params[param]['max']*self.params[param]['slider_conversion_factor']))
self.params[param]['slider'].setSingleStep(1) # integers only
self.params[param]['slider'].setOrientation(QtCore.Qt.Orientation.Horizontal) # horizontale
temp.setWidget(3, self.params[param]['slider'])
self.tree.addTopLevelItem(temp)
value = np.round(self.params[param]['value'][-1]*self.params[param]['slider_conversion_factor'],self.spinbox_precision) # convert in slider integer unit
self.params[param]['slider'].setValue(int(value))
self.params[param]['slider'].valueChanged.connect(partial(self.update_spinbox_params,param))
# Create checkbox
self.params[param]['checkbox'] = QtGui.QCheckBox()
temp.setWidget(1, self.params[param]['checkbox'])
self.tree.addTopLevelItem(temp)
self.params[param]['checkbox'].setChecked(self.params[param]['plot']) # set initial state
self.params[param]['checkbox'].keyPressEvent = self.keyPressEvent # connect keys
self.params[param]['checkbox'].stateChanged.connect(partial(self.update_checkbox_variable,param)) # connect checkbox
flag += 1
# Kernel Tree
self.tree_kernels = self.ui.tree_kernels
self.tree_kernels.setColumnCount(2)
self.tree_kernels.keyPressEvent = self.keyPressEvent
self.tree_kernels.setHeaderLabels(['Kernels',''])
# Create a group of buttons to allow "exclusive" behavior
self.group_buttons_kernels = QtGui.QButtonGroup()
self.group_buttons_kernels.setExclusive(True)
for kernel in self.kernels.keys():
self.kernels[kernel]['checkbox'] = QtGui.QCheckBox()
self.group_buttons_kernels.addButton(self.kernels[kernel]['checkbox'], 1)
temp = pg.TreeWidgetItem([kernel])
temp.setWidget(1, self.kernels[kernel]['checkbox'])
self.tree_kernels.addTopLevelItem(temp)
if kernel == self.kernel:
self.kernels[kernel]['checkbox'].setChecked(True) # set initial state
self.kernels[kernel]['checkbox'].keyPressEvent = self.keyPressEvent
self.group_buttons_kernels.buttonClicked.connect(self.update_checkbox_kernel)
############################# END Trees declaration ############################
# Start showing the window
self.show()
# Connect timer to update the figure
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.run_simulator)
self.timer.start(10)
# Initial window states
if not self.streaming: self.timer.stop(); self.run_simulator()
self.update_pause_indicator()
self.update_record_state_indicator()
# If starts recording from beginning
if self.record_state:
self.toggle_record_state()
self.keyPressEvent("r")
self.t = 0
################################ BEGIN plots update ###################################
def update_zoom_plot(self,dock_name,relatedTo):
self.docks[dock_name]['actual_plot'].setXRange(*self.docks[relatedTo]['region'][dock_name].getRegion(), padding=0)
def update_xzoom_region(self,dock_name,relatedTo):
#print('1',self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[1])
#print('2',self.docks[relatedTo]['actual_plot'].getViewBox().viewRange()[1])
self.docks[relatedTo]['region'][dock_name].setRegion(self.docks[dock_name]['actual_plot'].getViewBox().viewRange()[0])
def update_plots(self):
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'plot1D':
for variable in self.variables.keys():
if self.variables[variable]['plot']:
if 'dock' in self.variables[variable].keys():
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
else:
self.docks[dock_name]['curve'][variable].setData(self.variables[variable]['value'])
for param in self.params.keys():
if self.params[param]['plot']:
if 'dock' in self.params[param].keys():
if dock_name in self.params[param]['dock']:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
else:
self.docks[dock_name]['curve'][param].setData(self.params[param]['value'])
elif self.docks[dock_name]['type'] == 'plot2D':
# plot the variable names that are pre stored in dock dict
for curve2D in self.docks[dock_name]['curve']:
# if there is a param in the list
list_params_in_variables_provided = [i for i in self.docks[dock_name]['curve'][curve2D]['variables_to_plot'] if i in list(self.params.keys())]
if len(list_params_in_variables_provided)==1:
param_provided = list_params_in_variables_provided[0]
index_param_provided = self.docks[dock_name]['curve'][curve2D]['variables_to_plot'].index(param_provided)
index_variable_provided = list(set([0,1]) - set([index_param_provided]))
if self.variables[curve2D.split('_plot2D_')[0]]['plot']:
if index_param_provided == 0:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif index_param_provided == 1:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.params[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
# no params provided
else:
# if variables specified, index 0 is to be plot
if self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['plot']:
self.docks[dock_name]['curve'][curve2D]['curve'].setData(self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][0]]['value'],self.variables[self.docks[dock_name]['curve'][curve2D]['variables_to_plot'][1]]['value'])
elif self.docks[dock_name]['type'] == 'image':
for variable in self.variables.keys():
if 'dock' in self.variables[variable].keys():
if self.variables[variable]['plot']:
if dock_name in self.variables[variable]['dock']:
self.docks[dock_name]['actual_plot'].setImage(self.variables[variable]['value'])
# Update fps_label
self.update_fps_label()
def run_simulator(self,nstep_update_plot=None):
if not nstep_update_plot: nstep_update_plot = self.nstep_update_plot
# Calculation
for i in range(nstep_update_plot):
self.simulator()
# If recording
if self.record_state and (self.nstep%self.nstep_record == 0): # record every self.nstep_record
self.append_to_dataframe()
# Update main plots every nstep_update_plot (last occurence of the loop)
if i==nstep_update_plot-1:
self.update_plots()
# Update time_stamp and parameter dict last (then saved correspond to calculation)
self.time_stamp[:-1] = self.time_stamp[1:]
self.time_stamp[-1] += self.step_size
self.nstep += 1
for param in self.params.keys():
self.params[param]['value'][:-1] = self.params[param]['value'][1:]
# Fix app freezing on Windows systems (if event occurs must process it)
QtCore.QCoreApplication.processEvents()
################################# END plots update ###################################
def keyPressEvent(self, event):
""" Set keyboard interactions """
try: key = event.text()
except: key = event # allow calling keys programatically
if key in list(self.user_defined_keyPressEvent.keys()): # Interprete keys defined user file
self.user_defined_keyPressEvent[key](self,{key:value['value'] for (key,value) in self.variables.items()},{key:value['value'][-1] for (key,value) in self.params.items()})
elif key == ' ':
self.toggle_streaming()
elif key == 'q':
sys.exit()
elif key == 'h':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming()
self.display_help()
if previous_streaming_state: self.toggle_streaming()
elif key == 's' or key == 'r':
previous_streaming_state = self.streaming
if previous_streaming_state: self.toggle_streaming() # pause it
if key=='s': self.save() # query filename and save initial screenshot
elif key=='r':
if not self.record_state:
self.save(record=True)
else:
self.toggle_record_state()
self.save_screenshot(self.filename_to_record_no_ext+'_END.png')
self.save_appended_dataframe()
self.filename_to_record_no_ext = None
if previous_streaming_state: self.toggle_streaming()
elif key == 'i':
self.change_ICs_variable()
elif key == 'c':
self.update_images_colormap()
else:
if key != "" and event.key() != QtCore.Qt.Key_Return:
print(f'Keyboard event "{key}" not None')
def create_PlotWidget(self,dock_name):
self.docks[dock_name]['actual_plot'] = pg.PlotWidget(**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def create_ImageView(self,dock_name):
# Item for displaying image data
pl = pg.PlotItem() # to get axis
img = pg.ImageItem(axisOrder='row-major') # to rotate 90 degree
# Create an ImageView Widget
self.docks[dock_name]['actual_plot'] = pg.ImageView(view=pl,imageItem=img,**{key:value for key,value in self.docks[dock_name].items() if key not in ['dock','type','position','relativeTo','size','zoomOf','region']})
# Set initial states
self.docks[dock_name]['actual_plot'].view.invertY(False)
self.docks[dock_name]['actual_plot'].view.setAspectLocked(False)
self.docks[dock_name]['actual_plot'].view.disableAutoRange(True)
self.docks[dock_name]['actual_plot'].ui.menuBtn.hide()
#self.docks[dock_name]['actual_plot'].ui.menuBtn.show()
#self.docks[dock_name]['actual_plot'].ui.histogram.hide()
#self.docks[dock_name]['actual_plot'].ui.roiBtn.hide()
# Set colormap to be used
gradient = Gradients[self.colormaps_list[self.flag_colormaps]]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.docks[dock_name]['dock'].addWidget(self.docks[dock_name]['actual_plot'])
def add_dock(self,dock_name):
''' Add a dock to the main window '''
if 'relativeTo' in self.docks[dock_name].keys():
relativeto_dock_name = self.docks[dock_name]['relativeTo']
assert 'dock' in self.docks[relativeto_dock_name].keys(), f"Dock '{relativeto_dock_name}' not understood. Docks that are 'relativeTo' another must be defined after it in the dictionnary of docks for consistent behavior"
self.docks[dock_name]['region'] = {} # 'region' key to be used later
self.docks[dock_name]['dock'] = Dock(dock_name, size=self.docks[dock_name]['size'], closable=True)
self.main_dock_area.addDock(**{key:value for key,value in self.docks[dock_name].items() if key in ['dock','position','relativeTo']}) # key used: 'dock', 'position' and 'relativeTo'
def repaint_all_plots(self):
for dock_name in self.docks.keys():
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].repaint()
def toggle_streaming(self):
self.streaming = not(self.streaming)
self.update_pause_indicator()
def update_pause_indicator(self):
if self.streaming:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['g']['hex'],(0,0,0)))
self.ui.run_label.setText(' Run ')
self.timer.start(10)
else:
self.ui.run_label.setStyleSheet("QLabel {border: 3px solid %s; background-color : %s; color : %s; }" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
self.ui.run_label.setText(' Stop ')
self.timer.stop()
self.ui.run_label.repaint()
def update_images_colormap(self):
self.flag_colormaps += 1
cmap_name = self.colormaps_list[np.mod(self.flag_colormaps,len(self.colormaps_list))]
gradient = Gradients[cmap_name]
cmap = pg.ColorMap(pos=[c[0] for c in gradient['ticks']],color=[c[1] for c in gradient['ticks']], mode=gradient['mode'])
for dock_name in self.docks.keys():
if self.docks[dock_name]['type'] == 'image':
if 'actual_plot' in self.docks[dock_name]:
self.docks[dock_name]['actual_plot'].setColorMap(cmap)
self.repaint_all_plots()
def update_record_state_indicator(self):
if self.record_state:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000',self.colors_dict['r']['hex'],(0,0,0)))
else:
self.ui.record_label.setStyleSheet("border: 3px solid %s; border-radius: 22px; background-color : %s; color : %s" %('#000000','#000000','#000000'))
self.ui.record_label.repaint()
def update_ICs_button(self):
for variable in self.variables.keys():
if not self.variables[variable]['observable']:
value = np.array(self.variables[variable]['init_cond']).astype(self.variables[variable]['type']) # convert to array to be able to astype
self.variables[variable]['lineedit'].setText(str(value)) # set initial value
self.variables[variable]['value'] = self.variables[variable]['init_cond'] * np.ones(self.array_size).astype(self.variables[variable]['type'])
def display_help(self):
# Message must be a list of each line to display
text_help_dialog = ['Important Notes:','- (keyboard keys) do not work when focus is given to lineedits or spinboxes','- ("image" plots) you must pause to modify the aspect ratio, zoom or histogram range']
text_help_dialog += ['']
text_help_dialog += ['Usable keyboard keys:','- " ": toggle run/stop','- "q": close the window','- "h": display this help message','- "s": save a snapshot and a dataframe','- "r": toggle recording, save snapshots at start/end','- "i": apply all variables ICs','- "c": change the colormap to be use to draw "image" plots']
text_help_dialog += ['']
text_help_dialog += ['Defined variables and observables:']
for variable in self.variables.keys():
temp = '- "'
temp += variable+'"'
if self.variables[variable]['observable']: temp += ' (observable)'
elif not self.variables[variable]['observable']: temp += ' (variable)'
if 'help' in self.variables[variable].keys(): temp += f": {self.variables[variable]['help']}"
text_help_dialog += [temp]
text_help_dialog += ['']
text_help_dialog += ['Defined parameters:']
for param in self.params.keys():
temp = '- "'
temp += param+'"'
if 'help' in self.params[param].keys(): temp += f", help: {self.params[param]['help']}"
for key in self.params[param].keys():
if key in ['min','max','step','value']:
if key=='value':
temp += f", {key}: {self.params[param][key][-1]}"
else:
temp += f", {key}: {self.params[param][key]}"
text_help_dialog += [temp]
help_dialog = ScrollMessageBox(text_help_dialog,size_help=(850,600))
help_dialog.setWindowTitle('Help message')
help_dialog.exec_()
################################# BEGIN save ###################################
def save(self,record=False,filename_to_save_no_ext=None):
self.filename_to_save_no_ext = filename_to_save_no_ext
if self.filename_to_save_no_ext is None:
save_dialog = QtGui.QFileDialog()
save_dialog.setFileMode(QtGui.QFileDialog.AnyFile)
save_dialog.setNameFilter("Output files (*.png *.xlsx)")
save_dialog.setWindowTitle("Saving files: screenshot, traces and window state")
if save_dialog.exec_():
filename_provided = save_dialog.selectedFiles()[0]
if '.' in filename_provided:
self.filename_to_save_no_ext = filename_provided.rstrip('.')
else:
self.filename_to_save_no_ext = filename_provided
# Build a dict of the existing conflicting files
existing_filename_dict = {}
for filename in [self.filename_to_save_no_ext+'.png',self.filename_to_save_no_ext+'.xlsx']:
if os.path.exists(filename):
existing_filename_dict[filename] = {}
existing_filename_dict[filename]['name'] = filename.split("/")[-1]
existing_filename_dict[filename]['path'] = filename.rstrip(filename.split("/")[-1])
existing_filename_dict[filename]['path']
# Open a confirmation window if filename_provided exists
if len(existing_filename_dict) > 0:
file_exists_dialog = QtGui.QMessageBox()
file_exists_dialog.setIcon(QtGui.QMessageBox.Warning)
file_exists_dialog.setWindowTitle('Warning: file already exists')
names = '" and "'.join([existing_filename_dict[key]['name'] for key in existing_filename_dict.keys()])
path = existing_filename_dict[list(existing_filename_dict.keys())[0]]['path']
if len(existing_filename_dict) > 1: extra_text = ['s','','them','them','their']
elif len(existing_filename_dict) == 1: extra_text = ['','s','it','it','its']
file_exists_dialog.setText(f'File{extra_text[0]} named "{names}" already exist{extra_text[1]} at location "{path}". Do you want to replace {extra_text[2]}?')
file_exists_dialog.setInformativeText(f'Replacing {extra_text[3]} will overwrite {extra_text[4]} contents forever.')
file_exists_dialog.setStandardButtons(QtGui.QMessageBox.Save|QtGui.QMessageBox.Cancel)
file_exists_dialog.setDefaultButton(QtGui.QMessageBox.Cancel)
file_exists_dialog.buttonClicked.connect(self.overwrite_buttons)
file_exists_dialog.exec_()
save_dialog.close()
# if closing the window or chose not to overwrite => no filename
if self.filename_to_save_no_ext is None: return
# save screenshot
time.sleep(0.05) # wait for save_dialog to close before the snapshot
add_text = '_START' if record else ''
self.save_screenshot(self.filename_to_save_no_ext+f"{add_text}.png")
# save dataframe with variables, observables and parameter values
self.save_dataframe(self.filename_to_save_no_ext+'.xlsx')
if record:
self.list_to_record = []
self.filename_to_record_no_ext = self.filename_to_save_no_ext
self.toggle_record_state()
def overwrite_buttons(self,event):
button_pressed = event.text()
if button_pressed == 'Cancel':
self.filename_to_save_no_ext = None
elif button_pressed == 'Save':
return
def toggle_record_state(self):
self.record_state = not(self.record_state)
self.update_record_state_indicator()
def save_screenshot(self,filename):
""" Save a screenshot of the main_splitter (the whole "main" window) """
screen = QtWidgets.QApplication.primaryScreen()
screenshot = screen.grabWindow( self.ui.main_splitter.winId() )
screenshot.save(filename, 'png')
print(f'File "{filename}" saved')
def save_dataframe(self,filename):
data_frame = self.build_dataframe_to_save()
data_frame.to_excel(filename,index=False)
print(f'File "{filename}" saved')
def save_appended_dataframe(self,sheet_name='Sheet1'):
writer = | pd.ExcelWriter(self.filename_to_record_no_ext+'.xlsx', engine='openpyxl') | pandas.ExcelWriter |
"""Tests for `models` module."""
import pytest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from pipelitools.models import models as m
@pytest.fixture(scope="function")
def df_binary():
X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=1)
X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=2)
y_train = | pd.Series(y_train) | pandas.Series |
import warnings
from bisect import bisect_left, bisect_right
from collections import Counter
import allel
import dask.array as da
import numba
import numpy as np
import pandas as pd
import xarray as xr
import zarr
import malariagen_data
from . import veff
from .util import ( # type_error,
DIM_ALLELE,
DIM_PLOIDY,
DIM_SAMPLE,
DIM_VARIANT,
Region,
da_compress,
da_from_zarr,
dask_compress_dataset,
init_filesystem,
init_zarr_store,
locate_region,
read_gff3,
resolve_region,
type_error,
unpack_gff3_attributes,
xarray_concat,
)
PUBLIC_RELEASES = ("3.0",)
DEFAULT_URL = "gs://vo_agam_release/"
GENESET_GFF3_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_BASEFEATURES_AgamP4.12.gff3.gz"
)
GENOME_ZARR_PATH = (
"reference/genome/agamp4/Anopheles-gambiae-PEST_CHROMOSOMES_AgamP4.zarr"
)
DEFAULT_SPECIES_ANALYSIS = "aim_20200422"
DEFAULT_SITE_FILTERS_ANALYSIS = "dt_20200416"
DEFAULT_COHORTS_ANALYSIS = "20211101"
CONTIGS = "2R", "2L", "3R", "3L", "X"
AA_CHANGE_QUERY = (
"effect in ['NON_SYNONYMOUS_CODING', 'START_LOST', 'STOP_LOST', 'STOP_GAINED']"
)
# Note regarding release identifiers and storage paths. Within the
# data storage, we have used path segments like "v3", "v3.1", "v3.2",
# etc., to separate data from different releases. There is an inconsistency
# in this convention, because the "v3" should have been "v3.0". To
# make the API more consistent, we would like to use consistent release
# identifiers like "3.0", "3.1", "3.2", etc., as parameter values and
# when release identifiers are added to returned dataframes. In order to
# achieve this, below we define two functions that allow mapping between
# these consistent release identifiers, and the less consistent release
# storage path segments.
def _release_to_path(release):
"""Compatibility function, allows us to use release identifiers like "3.0"
and "3.1" in the public API, and map these internally into storage path
segments."""
if release == "3.0":
# special case
return "v3"
elif release.startswith("3."):
return f"v{release}"
else:
raise ValueError(f"Invalid release: {release!r}")
def _path_to_release(path):
"""Compatibility function, allows us to use release identifiers like "3.0"
and "3.1" in the public API, and map these internally into storage path
segments."""
if path == "v3":
return "3.0"
elif path.startswith("v3."):
return path[1:]
else:
raise RuntimeError(f"Unexpected release path: {path!r}")
class Ag3:
"""Provides access to data from Ag3.x releases.
Parameters
----------
url : str
Base path to data. Give "gs://vo_agam_release/" to use Google Cloud
Storage, or a local path on your file system if data have been
downloaded.
**kwargs
Passed through to fsspec when setting up file system access.
Examples
--------
Access data from Google Cloud Storage (default):
>>> import malariagen_data
>>> ag3 = malariagen_data.Ag3()
Access data downloaded to a local file system:
>>> ag3 = malariagen_data.Ag3("/local/path/to/vo_agam_release/")
Access data from Google Cloud Storage, with caching on the local file system
in a directory named "gcs_cache":
>>> ag3 = malariagen_data.Ag3(
... "simplecache::gs://vo_agam_release",
... simplecache=dict(cache_storage="gcs_cache"),
... )
"""
contigs = CONTIGS
def __init__(self, url=DEFAULT_URL, **kwargs):
self._url = url
self._pre = kwargs.pop("pre", False)
# setup filesystem
self._fs, self._base_path = init_filesystem(url, **kwargs)
# setup caches
self._cache_releases = None
self._cache_sample_sets = dict()
self._cache_general_metadata = dict()
self._cache_species_calls = dict()
self._cache_site_filters = dict()
self._cache_snp_sites = None
self._cache_snp_genotypes = dict()
self._cache_genome = None
self._cache_annotator = None
self._cache_geneset = dict()
self._cache_cross_metadata = None
self._cache_site_annotations = None
self._cache_cnv_hmm = dict()
self._cache_cnv_coverage_calls = dict()
self._cache_cnv_discordant_read_calls = dict()
self._cache_haplotypes = dict()
self._cache_haplotype_sites = dict()
self._cache_cohort_metadata = dict()
def __repr__(self):
return (
f"<MalariaGEN Ag3 data resource API>\n"
f"Storage URL : {self._url}\n"
f"Data releases available : {', '.join(self.releases)}\n"
f"Cohorts analysis : {DEFAULT_COHORTS_ANALYSIS}\n"
f"Species analysis : {DEFAULT_SPECIES_ANALYSIS}\n"
f"Site filters analysis : {DEFAULT_SITE_FILTERS_ANALYSIS}\n"
f"Software version : {malariagen_data.__version__}\n"
f"---\n"
f"Please note that data are subject to terms of use,\n"
f"for more information see https://www.malariagen.net/data\n"
f"or contact <EMAIL>. For API documentation see: \n"
f"https://malariagen.github.io/vector-data/ag3/api.html"
)
def _repr_html_(self):
return f"""
<table class="malariagen-ag3">
<thead>
<tr>
<th style="text-align: left" colspan="2">MalariaGEN Ag3 data resource API</th>
</tr>
<tr><td colspan="2" style="text-align: left">
Please note that data are subject to terms of use,
for more information see <a href="https://www.malariagen.net/data">
the MalariaGEN website</a> or contact <EMAIL>.
See also the <a href="https://malariagen.github.io/vector-data/ag3/api.html">Ag3 API docs</a>.
</td></tr>
</thead>
<tbody>
<tr>
<th style="text-align: left">
Storage URL
</th>
<td>{self._url}</td>
</tr>
<tr>
<th style="text-align: left">
Data releases available
</th>
<td>{', '.join(self.releases)}</td>
</tr>
<tr>
<th style="text-align: left">
Cohorts analysis
</th>
<td>{DEFAULT_COHORTS_ANALYSIS}</td>
</tr>
<tr>
<th style="text-align: left">
Species analysis
</th>
<td>{DEFAULT_SPECIES_ANALYSIS}</td>
</tr>
<tr>
<th style="text-align: left">
Site filters analysis
</th>
<td>{DEFAULT_SITE_FILTERS_ANALYSIS}</td>
</tr>
<tr>
<th style="text-align: left">
Software version
</th>
<td>{malariagen_data.__version__}</td>
</tr>
</tbody>
</table>
"""
@property
def releases(self):
"""The releases for which data are available at the given storage
location."""
if self._cache_releases is None:
if self._pre:
# Here we discover which releases are available, by listing the storage
# directory and examining the subdirectories. This may include "pre-releases"
# where data may be incomplete.
sub_dirs = [p.split("/")[-1] for p in self._fs.ls(self._base_path)]
releases = tuple(
sorted(
[
_path_to_release(d)
for d in sub_dirs
if d.startswith("v3")
and self._fs.exists(f"{self._base_path}/{d}/manifest.tsv")
]
)
)
if len(releases) == 0:
raise ValueError("No releases found.")
self._cache_releases = releases
else:
self._cache_releases = PUBLIC_RELEASES
return self._cache_releases
def _read_sample_sets(self, *, release):
"""Read the manifest of sample sets for a given release."""
release_path = _release_to_path(release)
path = f"{self._base_path}/{release_path}/manifest.tsv"
with self._fs.open(path) as f:
df = pd.read_csv(f, sep="\t", na_values="")
df["release"] = release
return df
def sample_sets(self, release=None):
"""Access a dataframe of sample sets.
Parameters
----------
release : str, optional
Release identifier. Give "3.0" to access the Ag1000G phase 3 data
release.
Returns
-------
df : pandas.DataFrame
A dataframe of sample sets, one row per sample set.
"""
if release is None:
# retrieve sample sets from all available releases
release = self.releases
if isinstance(release, str):
# retrieve sample sets for a single release
if release not in self.releases:
raise ValueError(f"Release not available: {release!r}")
try:
return self._cache_sample_sets[release]
except KeyError:
df = self._read_sample_sets(release=release)
self._cache_sample_sets[release] = df
return df
elif isinstance(release, (list, tuple)):
# check no duplicates
counter = Counter(release)
for k, v in counter.items():
if v > 1:
raise ValueError(f"Duplicate values: {k!r}.")
# retrieve sample sets from multiple releases
df = pd.concat(
[self.sample_sets(release=r) for r in release],
axis=0,
ignore_index=True,
)
return df
else:
raise TypeError
@property
def v3_wild(self):
# legacy, convenience property to access sample sets from the
# 3.0 release, excluding the lab crosses
return [
x
for x in self.sample_sets(release="3.0")["sample_set"].tolist()
if x != "AG1000G-X"
]
def _lookup_release(self, *, sample_set):
"""Find which release a sample set was included in."""
df_sample_sets = self.sample_sets().set_index("sample_set")
try:
return df_sample_sets.loc[sample_set]["release"]
except KeyError:
raise ValueError(f"No release found for sample set {sample_set!r}")
def _read_general_metadata(self, *, sample_set):
"""Read metadata for a single sample set."""
try:
return self._cache_general_metadata[sample_set]
except KeyError:
release = self._lookup_release(sample_set=sample_set)
release_path = _release_to_path(release)
path = f"{self._base_path}/{release_path}/metadata/general/{sample_set}/samples.meta.csv"
with self._fs.open(path) as f:
df = pd.read_csv(f, na_values="")
# ensure all column names are lower case
df.columns = [c.lower() for c in df.columns]
# add a couple of columns for convenience
df["sample_set"] = sample_set
df["release"] = release
self._cache_general_metadata[sample_set] = df
return df
def _read_species_calls(self, *, sample_set, analysis):
"""Read species calls for a single sample set."""
key = (sample_set, analysis)
try:
return self._cache_species_calls[key]
except KeyError:
release = self._lookup_release(sample_set=sample_set)
release_path = _release_to_path(release)
path_prefix = f"{self._base_path}/{release_path}/metadata"
if analysis == "aim_20200422":
path = f"{path_prefix}/species_calls_20200422/{sample_set}/samples.species_aim.csv"
elif analysis == "pca_20200422":
path = f"{path_prefix}/species_calls_20200422/{sample_set}/samples.species_pca.csv"
else:
raise ValueError(f"Unknown species calling analysis: {analysis!r}")
with self._fs.open(path) as f:
df = pd.read_csv(
f,
na_values="",
# ensure correct dtype even where all values are missing
dtype={
"species_gambcolu_arabiensis": object,
"species_gambiae_coluzzii": object,
},
)
# add a single species call column, for convenience
def consolidate_species(s):
species_gambcolu_arabiensis = s["species_gambcolu_arabiensis"]
species_gambiae_coluzzii = s["species_gambiae_coluzzii"]
if species_gambcolu_arabiensis == "arabiensis":
return "arabiensis"
elif species_gambcolu_arabiensis == "intermediate":
return "intermediate_arabiensis_gambiae"
elif species_gambcolu_arabiensis == "gamb_colu":
# look at gambiae_vs_coluzzii
if species_gambiae_coluzzii == "gambiae":
return "gambiae"
elif species_gambiae_coluzzii == "coluzzii":
return "coluzzii"
elif species_gambiae_coluzzii == "intermediate":
return "intermediate_gambiae_coluzzii"
else:
# some individuals, e.g., crosses, have a missing species call
return np.nan
df["species"] = df.apply(consolidate_species, axis=1)
if analysis == "aim_20200422":
# normalise column prefixes
df = df.rename(
columns={
"aim_fraction_arab": "aim_species_fraction_arab",
"aim_fraction_colu": "aim_species_fraction_colu",
"species_gambcolu_arabiensis": "aim_species_gambcolu_arabiensis",
"species_gambiae_coluzzii": "aim_species_gambiae_coluzzii",
"species": "aim_species",
}
)
elif analysis == "pca_20200422":
# normalise column prefixes
df = df.rename(
# normalise column prefixes
columns={
"PC1": "pca_species_PC1",
"PC2": "pca_species_PC2",
"species_gambcolu_arabiensis": "pca_species_gambcolu_arabiensis",
"species_gambiae_coluzzii": "pca_species_gambiae_coluzzii",
"species": "pca_species",
}
)
# ensure all column names are lower case
df.columns = [c.lower() for c in df.columns]
self._cache_species_calls[key] = df
return df
def _prep_sample_sets_arg(self, *, sample_sets):
"""Common handling for the `sample_sets` parameter. For convenience, we
allow this to be a single sample set, or a list of sample sets, or a
release identifier, or a list of release identifiers."""
if sample_sets is None:
# all available sample sets
sample_sets = self.sample_sets()["sample_set"].tolist()
elif isinstance(sample_sets, str):
if sample_sets.startswith("3."):
# convenience, can use a release identifier to denote all sample sets
# in a release
sample_sets = self.sample_sets(release=sample_sets)[
"sample_set"
].tolist()
else:
# single sample set, normalise to always return a list
sample_sets = [sample_sets]
elif isinstance(sample_sets, (list, tuple)):
# list or tuple of sample sets or releases
prepped_sample_sets = []
for s in sample_sets:
# make a recursive call to handle the case where s is a release identifier
sp = self._prep_sample_sets_arg(sample_sets=s)
# make sure we end up with a flat list of sample sets
if isinstance(sp, str):
prepped_sample_sets.append(sp)
else:
prepped_sample_sets.extend(sp)
sample_sets = prepped_sample_sets
else:
raise TypeError(
f"Invalid type for sample_sets parameter; expected str, list or tuple; found: {sample_sets!r}"
)
# check all sample sets selected at most once
counter = Counter(sample_sets)
for k, v in counter.items():
if v > 1:
raise ValueError(
f"Bad value for sample_sets parameter, {k:!r} selected more than once."
)
return sample_sets
def species_calls(self, sample_sets=None, analysis=DEFAULT_SPECIES_ANALYSIS):
"""Access species calls for one or more sample sets.
Parameters
----------
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of
sample set identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"] or a
release identifier (e.g., "3.0") or a list of release identifiers.
analysis : {"aim_20200422", "pca_20200422"}
Species calling analysis.
Returns
-------
df : pandas.DataFrame
A dataframe of species calls for one or more sample sets, one row
per sample.
"""
sample_sets = self._prep_sample_sets_arg(sample_sets=sample_sets)
# concatenate multiple sample sets
dfs = [
self._read_species_calls(sample_set=s, analysis=analysis)
for s in sample_sets
]
df = pd.concat(dfs, axis=0, ignore_index=True)
return df
def _sample_metadata(self, *, sample_set, species_analysis, cohorts_analysis):
df = self._read_general_metadata(sample_set=sample_set)
if species_analysis is not None:
df_species = self._read_species_calls(
sample_set=sample_set, analysis=species_analysis
)
df = df.merge(df_species, on="sample_id", sort=False)
if cohorts_analysis is not None:
df_cohorts = self.sample_cohorts(
sample_sets=sample_set, cohorts_analysis=cohorts_analysis
)
df = df.merge(df_cohorts, on="sample_id", sort=False)
return df
def sample_metadata(
self,
sample_sets=None,
species_analysis=DEFAULT_SPECIES_ANALYSIS,
cohorts_analysis=DEFAULT_COHORTS_ANALYSIS,
):
"""Access sample metadata for one or more sample sets.
Parameters
----------
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of
sample set identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"]) or a
release identifier (e.g., "3.0") or a list of release identifiers.
species_analysis : {"aim_20200422", "pca_20200422"}, optional
Include species calls in metadata.
cohorts_analysis : str, optional
Cohort analysis identifier (date of analysis), optional, default is
the latest version. Includes sample cohort calls in metadata.
Returns
-------
df : pandas.DataFrame
A dataframe of sample metadata, one row per sample.
"""
sample_sets = self._prep_sample_sets_arg(sample_sets=sample_sets)
# concatenate multiple sample sets
dfs = [
self._sample_metadata(
sample_set=s,
species_analysis=species_analysis,
cohorts_analysis=cohorts_analysis,
)
for s in sample_sets
]
df = pd.concat(dfs, axis=0, ignore_index=True)
return df
def open_site_filters(self, mask, analysis=DEFAULT_SITE_FILTERS_ANALYSIS):
"""Open site filters zarr.
Parameters
----------
mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Mask to use.
analysis : str, optional
Site filters analysis version.
Returns
-------
root : zarr.hierarchy.Group
"""
key = mask, analysis
try:
return self._cache_site_filters[key]
except KeyError:
path = f"{self._base_path}/v3/site_filters/{analysis}/{mask}/"
store = init_zarr_store(fs=self._fs, path=path)
root = zarr.open_consolidated(store=store)
self._cache_site_filters[key] = root
return root
def _site_filters(
self,
*,
region,
mask,
field,
analysis,
inline_array,
chunks,
):
assert isinstance(region, Region)
root = self.open_site_filters(mask=mask, analysis=analysis)
z = root[f"{region.contig}/variants/{field}"]
d = da_from_zarr(z, inline_array=inline_array, chunks=chunks)
if region.start or region.end:
pos = self.snp_sites(region=region.contig, field="POS")
loc_region = locate_region(region, pos)
d = d[loc_region]
return d
def site_filters(
self,
region,
mask,
field="filter_pass",
analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
inline_array=True,
chunks="native",
):
"""Access SNP site filters.
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Mask to use.
field : str, optional
Array to access.
analysis : str, optional
Site filters analysis version.
inline_array : bool, optional
Passed through to dask.from_array().
chunks : str, optional
If 'auto' let dask decide chunk size. If 'native' use native zarr
chunks. Also, can be a target size, e.g., '200 MiB'.
Returns
-------
d : dask.array.Array
An array of boolean values identifying sites that pass the filters.
"""
region = self.resolve_region(region)
if isinstance(region, Region):
region = [region]
d = da.concatenate(
[
self._site_filters(
region=r,
mask=mask,
field=field,
analysis=analysis,
inline_array=inline_array,
chunks=chunks,
)
for r in region
]
)
return d
def open_snp_sites(self):
"""Open SNP sites zarr.
Returns
-------
root : zarr.hierarchy.Group
"""
if self._cache_snp_sites is None:
path = f"{self._base_path}/v3/snp_genotypes/all/sites/"
store = init_zarr_store(fs=self._fs, path=path)
root = zarr.open_consolidated(store=store)
self._cache_snp_sites = root
return self._cache_snp_sites
def _snp_sites(
self,
*,
region,
field,
inline_array,
chunks,
):
assert isinstance(region, Region), type(region)
root = self.open_snp_sites()
z = root[f"{region.contig}/variants/{field}"]
ret = da_from_zarr(z, inline_array=inline_array, chunks=chunks)
if region.start or region.end:
pos = root[f"{region.contig}/variants/POS"]
loc_region = locate_region(region, pos)
ret = ret[loc_region]
return ret
def snp_sites(
self,
region,
field,
site_mask=None,
site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
inline_array=True,
chunks="native",
):
"""Access SNP site data (positions and alleles).
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
field : {"POS", "REF", "ALT"}
Array to access.
site_mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Site filters mask to apply.
site_filters_analysis : str
Site filters analysis version.
inline_array : bool, optional
Passed through to dask.array.from_array().
chunks : str, optional
If 'auto' let dask decide chunk size. If 'native' use native zarr
chunks. Also, can be a target size, e.g., '200 MiB'.
Returns
-------
d : dask.array.Array
An array of either SNP positions, reference alleles or alternate
alleles.
"""
region = self.resolve_region(region)
if isinstance(region, Region):
region = [region]
# concatenate
ret = da.concatenate(
[
self._snp_sites(
region=r,
field=field,
chunks=chunks,
inline_array=inline_array,
)
for r in region
],
axis=0,
)
if site_mask is not None:
loc_sites = self.site_filters(
region=region,
mask=site_mask,
analysis=site_filters_analysis,
chunks=chunks,
inline_array=inline_array,
)
ret = da_compress(loc_sites, ret, axis=0)
return ret
def open_snp_genotypes(self, sample_set):
"""Open SNP genotypes zarr.
Parameters
----------
sample_set : str
Returns
-------
root : zarr.hierarchy.Group
"""
try:
return self._cache_snp_genotypes[sample_set]
except KeyError:
release = self._lookup_release(sample_set=sample_set)
release_path = _release_to_path(release)
path = f"{self._base_path}/{release_path}/snp_genotypes/all/{sample_set}/"
store = init_zarr_store(fs=self._fs, path=path)
root = zarr.open_consolidated(store=store)
self._cache_snp_genotypes[sample_set] = root
return root
def _snp_genotypes(self, *, region, sample_set, field, inline_array, chunks):
# single contig, single sample set
assert isinstance(region, Region)
assert isinstance(sample_set, str)
root = self.open_snp_genotypes(sample_set=sample_set)
z = root[f"{region.contig}/calldata/{field}"]
d = da_from_zarr(z, inline_array=inline_array, chunks=chunks)
if region.start or region.end:
pos = self.snp_sites(region=region.contig, field="POS")
loc_region = locate_region(region, pos)
d = d[loc_region]
return d
def snp_genotypes(
self,
region,
sample_sets=None,
field="GT",
site_mask=None,
site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
inline_array=True,
chunks="native",
):
"""Access SNP genotypes and associated data.
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of
sample set identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"]) or a
release identifier (e.g., "3.0") or a list of release identifiers.
field : {"GT", "GQ", "AD", "MQ"}
Array to access.
site_mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Site filters mask to apply.
site_filters_analysis : str, optional
Site filters analysis version.
inline_array : bool, optional
Passed through to dask.array.from_array().
chunks : str, optional
If 'auto' let dask decide chunk size. If 'native' use native zarr
chunks. Also, can be a target size, e.g., '200 MiB'.
Returns
-------
d : dask.array.Array
An array of either genotypes (GT), genotype quality (GQ), allele
depths (AD) or mapping quality (MQ) values.
"""
# normalise parameters
sample_sets = self._prep_sample_sets_arg(sample_sets=sample_sets)
region = self.resolve_region(region)
# normalise region to list to simplify concatenation logic
if isinstance(region, Region):
region = [region]
# concatenate multiple sample sets and/or contigs
lx = []
for r in region:
ly = []
for s in sample_sets:
y = self._snp_genotypes(
region=Region(r.contig, None, None),
sample_set=s,
field=field,
inline_array=inline_array,
chunks=chunks,
)
ly.append(y)
# concatenate data from multiple sample sets
x = da.concatenate(ly, axis=1)
# locate region - do this only once, optimisation
if r.start or r.end:
pos = self.snp_sites(region=r.contig, field="POS")
loc_region = locate_region(r, pos)
x = x[loc_region]
lx.append(x)
# concatenate data from multiple regions
d = da.concatenate(lx, axis=0)
# apply site filters if requested
if site_mask is not None:
loc_sites = self.site_filters(
region=region, mask=site_mask, analysis=site_filters_analysis
)
d = da_compress(loc_sites, d, axis=0)
return d
def open_genome(self):
"""Open the reference genome zarr.
Returns
-------
root : zarr.hierarchy.Group
Zarr hierarchy containing the reference genome sequence.
"""
if self._cache_genome is None:
path = f"{self._base_path}/{GENOME_ZARR_PATH}"
store = init_zarr_store(fs=self._fs, path=path)
self._cache_genome = zarr.open_consolidated(store=store)
return self._cache_genome
def genome_sequence(self, region, inline_array=True, chunks="native"):
"""Access the reference genome sequence.
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
inline_array : bool, optional
Passed through to dask.array.from_array().
chunks : str, optional
If 'auto' let dask decide chunk size. If 'native' use native zarr
chunks. Also, can be a target size, e.g., '200 MiB'.
Returns
-------
d : dask.array.Array
An array of nucleotides giving the reference genome sequence for the
given contig.
"""
genome = self.open_genome()
region = self.resolve_region(region)
z = genome[region.contig]
d = da_from_zarr(z, inline_array=inline_array, chunks=chunks)
if region.start:
slice_start = region.start - 1
else:
slice_start = None
if region.end:
slice_stop = region.end
else:
slice_stop = None
loc_region = slice(slice_start, slice_stop)
return d[loc_region]
def geneset(self, region=None, attributes=("ID", "Parent", "Name", "description")):
"""Access genome feature annotations (AgamP4.12).
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
attributes : list of str, optional
Attribute keys to unpack into columns. Provide "*" to unpack all
attributes.
Returns
-------
df : pandas.DataFrame
A dataframe of genome annotations, one row per feature.
"""
if attributes is not None:
attributes = tuple(attributes)
try:
df = self._cache_geneset[attributes]
except KeyError:
path = f"{self._base_path}/{GENESET_GFF3_PATH}"
with self._fs.open(path, mode="rb") as f:
df = read_gff3(f, compression="gzip")
if attributes is not None:
df = unpack_gff3_attributes(df, attributes=attributes)
self._cache_geneset[attributes] = df
# handle region
if region is not None:
region = self.resolve_region(region)
# normalise to list to simplify concatenation logic
if isinstance(region, Region):
region = [region]
# apply region query
parts = []
for r in region:
df_part = df.query(f"contig == '{r.contig}'")
if r.end is not None:
df_part = df_part.query(f"start <= {r.end}")
if r.start is not None:
df_part = df_part.query(f"end >= {r.start}")
parts.append(df_part)
df = pd.concat(parts, axis=0)
return df
def _transcript_to_gene_name(self, transcript):
df_geneset = self.geneset().set_index("ID")
rec_transcript = df_geneset.loc[transcript]
parent = rec_transcript["Parent"]
rec_parent = df_geneset.loc[parent]
# manual overrides
if parent == "AGAP004707":
parent_name = "Vgsc/para"
else:
parent_name = rec_parent["Name"]
return parent_name
def is_accessible(
self, region, site_mask, site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS
):
"""Compute genome accessibility array.
Parameters
----------
region: str or list of str or Region or list of Region
Chromosome arm (e.g., "2L"), gene name (e.g., "AGAP007280"), genomic
region defined with coordinates (e.g., "2L:44989425-44998059") or a
named tuple with genomic location `Region(contig, start, end)`.
Multiple values can be provided as a list, in which case data will
be concatenated, e.g., ["3R", "3L"].
site_mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Site filters mask to apply.
site_filters_analysis : str, optional
Site filters analysis version.
Returns
-------
a : numpy.ndarray
An array of boolean values identifying accessible genome sites.
"""
# resolve region
region = self.resolve_region(region)
# determine contig sequence length
seq_length = self.genome_sequence(region).shape[0]
# setup output
is_accessible = np.zeros(seq_length, dtype=bool)
pos = self.snp_sites(region=region, field="POS").compute()
if region.start:
offset = region.start
else:
offset = 1
# access site filters
filter_pass = self.site_filters(
region=region, mask=site_mask, analysis=site_filters_analysis
).compute()
# assign values from site filters
is_accessible[pos - offset] = filter_pass
return is_accessible
@staticmethod
def _site_mask_ids(*, site_filters_analysis):
if site_filters_analysis == "dt_20200416":
return "gamb_colu_arab", "gamb_colu", "arab"
else:
raise ValueError
def _snp_df(self, *, transcript, site_filters_analysis):
"""Set up a dataframe with SNP site and filter columns."""
# get feature direct from geneset
gs = self.geneset()
feature = gs[gs["ID"] == transcript].squeeze()
contig = feature.contig
region = Region(contig, feature.start, feature.end)
# grab pos, ref and alt for chrom arm from snp_sites
pos = self.snp_sites(region=contig, field="POS")
ref = self.snp_sites(region=contig, field="REF")
alt = self.snp_sites(region=contig, field="ALT")
loc_feature = locate_region(region, pos)
pos = pos[loc_feature].compute()
ref = ref[loc_feature].compute()
alt = alt[loc_feature].compute()
# access site filters
filter_pass = dict()
masks = self._site_mask_ids(site_filters_analysis=site_filters_analysis)
for m in masks:
x = self.site_filters(region=contig, mask=m, analysis=site_filters_analysis)
x = x[loc_feature].compute()
filter_pass[m] = x
# setup columns with contig, pos, ref, alt columns
cols = {
"contig": contig,
"position": np.repeat(pos, 3),
"ref_allele": np.repeat(ref.astype("U1"), 3),
"alt_allele": alt.astype("U1").flatten(),
}
# add mask columns
for m in masks:
x = filter_pass[m]
cols[f"pass_{m}"] = np.repeat(x, 3)
# construct dataframe
df_snps = pd.DataFrame(cols)
return region, df_snps
def _annotator(self):
# setup variant effect annotator
if self._cache_annotator is None:
self._cache_annotator = veff.Annotator(
genome=self.open_genome(), geneset=self.geneset()
)
return self._cache_annotator
def snp_effects(
self,
transcript,
site_mask=None,
site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
):
"""Compute variant effects for a gene transcript.
Parameters
----------
transcript : str
Gene transcript ID (AgamP4.12), e.g., "AGAP004707-RA".
site_mask : {"gamb_colu_arab", "gamb_colu", "arab"}, optional
Site filters mask to apply.
site_filters_analysis : str, optional
Site filters analysis version.
Returns
-------
df : pandas.DataFrame
A dataframe of all possible SNP variants and their effects, one row
per variant.
"""
# setup initial dataframe of SNPs
_, df_snps = self._snp_df(
transcript=transcript, site_filters_analysis=site_filters_analysis
)
# setup variant effect annotator
ann = self._annotator()
# apply mask if requested
if site_mask is not None:
loc_sites = df_snps[f"pass_{site_mask}"]
df_snps = df_snps.loc[loc_sites]
# reset index after filtering
df_snps.reset_index(inplace=True, drop=True)
# add effects to the dataframe
ann.get_effects(transcript=transcript, variants=df_snps)
return df_snps
def snp_allele_frequencies(
self,
transcript,
cohorts,
sample_query=None,
cohorts_analysis=DEFAULT_COHORTS_ANALYSIS,
min_cohort_size=10,
site_mask=None,
site_filters_analysis=DEFAULT_SITE_FILTERS_ANALYSIS,
species_analysis=DEFAULT_SPECIES_ANALYSIS,
sample_sets=None,
drop_invariant=True,
effects=True,
):
"""Compute per variant allele frequencies for a gene transcript.
Parameters
----------
transcript : str
Gene transcript ID (AgamP4.12), e.g., "AGAP004707-RD".
cohorts : str or dict
If a string, gives the name of a predefined cohort set, e.g., one of
{"admin1_month", "admin1_year", "admin2_month", "admin2_year"}.
If a dict, should map cohort labels to sample queries, e.g.,
`{"bf_2012_col": "country == 'Burkina Faso' and year == 2012 and taxon == 'coluzzii'"}`.
sample_query : str, optional
A pandas query string which will be evaluated against the sample metadata e.g.,
"taxon == 'coluzzii' and country == 'Burkina Faso'".
cohorts_analysis : str
Cohort analysis version, default is the latest version.
min_cohort_size : int
Minimum cohort size. Any cohorts below this size are omitted.
site_mask : {"gamb_colu_arab", "gamb_colu", "arab"}
Site filters mask to apply.
site_filters_analysis : str, optional
Site filters analysis version.
species_analysis : {"aim_20200422", "pca_20200422"}, optional
Species calls analysis version.
sample_sets : str or list of str, optional
Can be a sample set identifier (e.g., "AG1000G-AO") or a list of sample set
identifiers (e.g., ["AG1000G-BF-A", "AG1000G-BF-B"]) or a release identifier (e.g.,
"3.0") or a list of release identifiers.
drop_invariant : bool, optional
If True, variants with no alternate allele calls in any cohorts are dropped from
the result.
effects : bool, optional
If True, add SNP effect columns.
Returns
-------
df : pandas.DataFrame
A dataframe of SNP frequencies, one row per variant.
Notes
-----
Cohorts with fewer samples than min_cohort_size will be excluded from output.
"""
# check parameters
_check_param_min_cohort_size(min_cohort_size)
# access sample metadata
df_samples = self.sample_metadata(
sample_sets=sample_sets,
cohorts_analysis=cohorts_analysis,
species_analysis=species_analysis,
)
# handle sample_query
loc_samples = None
if sample_query is not None:
loc_samples = df_samples.eval(sample_query).values
# setup initial dataframe of SNPs
region, df_snps = self._snp_df(
transcript=transcript, site_filters_analysis=site_filters_analysis
)
# get genotypes
gt = self.snp_genotypes(
region=region,
sample_sets=sample_sets,
field="GT",
)
# slice to feature location
gt = gt.compute()
# build coh dict
coh_dict = _locate_cohorts(cohorts=cohorts, df_samples=df_samples)
# count alleles
freq_cols = dict()
for coh, loc_coh in coh_dict.items():
# handle sample query
if loc_samples is not None:
loc_coh = loc_coh & loc_samples
n_samples = np.count_nonzero(loc_coh)
if n_samples >= min_cohort_size:
gt_coh = np.compress(loc_coh, gt, axis=1)
# count alleles
ac_coh = allel.GenotypeArray(gt_coh).count_alleles(max_allele=3)
# compute allele frequencies
af_coh = ac_coh.to_frequencies()
# add column to dict
freq_cols["frq_" + coh] = af_coh[:, 1:].flatten()
# build a dataframe with the frequency columns
df_freqs = pd.DataFrame(freq_cols)
# compute max_af
df_max_af = pd.DataFrame({"max_af": df_freqs.max(axis=1)})
# build the final dataframe
df_snps.reset_index(drop=True, inplace=True)
df_snps = | pd.concat([df_snps, df_freqs, df_max_af], axis=1) | pandas.concat |
"""
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import pandas as pd
from pandas.core.base import PandasObject
from pandas import compat
from pandas.compat import range
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCSparseSeries)
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from pandas.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
astype_nansafe, find_common_type)
from pandas.core.dtypes.missing import isnull, notnull, na_value_for_dtype
import pandas._libs.sparse as splib
from pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex
from pandas._libs import index as libindex
import pandas.core.algorithms as algos
import pandas.core.ops as ops
import pandas.io.formats.printing as printing
from pandas.util._decorators import Appender
from pandas.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _get_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, series=False):
if series and is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if name in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
elif name in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
# dtype used to find corresponding sparse method
if not | is_dtype_equal(left.dtype, right.dtype) | pandas.core.dtypes.common.is_dtype_equal |
# File path of weather_2012.csv is stored in path
import pandas as pd
import numpy as np
from scipy.stats import mode
def categorical(df):
categorical_var= df.select_dtypes(include='object').columns.tolist()
return categorical_var
def numerical(df):
numerical_var = df.select_dtypes(include='number').columns.tolist()
return numerical_var
def clear(df,col,val):
value_counts = df[col].value_counts()[val]
return value_counts
def instances_based_condition(df,col1,val1,col2,val2):
instance = df[(df[col1] > val1) & (df[col2]== val2)]
return instance
def agg_values_ina_month(df,date_col,agg_col, agg):
df[date_col] = | pd.to_datetime(df[date_col]) | pandas.to_datetime |
'''
example of loading FinMind api
'''
from Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset': 'TaiwanStockInfo'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Taiwan Stock Dividend Result----------------'''
form_data = {'dataset': 'StockDividendResult'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
import io
import random
import sqlite3
from typing import List
import pandas as pd
import requests
pd.set_option('display.max_columns', None)
seq2ids_db_fp = "target/seq2ids.db"
# sacc_table = 'ranges_to_download'
sacc_table = 'one_best_up'
keep_frac = 1
submission_chunk_size = 300
seq2ids_db_conn = sqlite3.connect(seq2ids_db_fp)
ranges_to_download_q = f"select distinct sacc from {sacc_table} order by sacc"
ranges_to_download_res = | pd.read_sql_query(ranges_to_download_q, seq2ids_db_conn) | pandas.read_sql_query |
"""=========================================================
pipeline_peakcalling - Produce Peaklist from Bam Files
=========================================================
Overview
========
The aim of this pipeline is to create peaklists in :term:`bed` files from
aligned reads in :term:`bam` files that can then be taken on to downstream
analysis (e.g. motif identification, quantification of peaks etc.). Pipeline
also and generates QC statistics that will inform you about the quality of
the peaksets generated.
In addition this pipeline will also produce normalised BigWigs if the samples
are generated using the quantitative ChIP-seq method.
Functionality
-------------
- Will generate BigWigs. If the samples are ran as a quantitative ChIP-Rx
experiment then the samples will be normalised according to spike-in's (
usually sp1 or drosophila cells)
- Takes Paired-end or single end :term:`Bam` files you want to call peaks in
(e.g. ChIP-Seq or ATAC-Seq samples and their appropriate 'input' controls).
- Runs peakcallers
- Runs ChIPQC R package for QC statistics
- Produces peak lists in bed files to takeforward for downstream analysis.
Optional functions:
-------------------
- Filter :term:`Bam` files to remove:
- Duplicates
- Secondary alignments
- Unpaired reads for paired-end files
- Reads overlapping 'blacklisted' regions
- Mapping quality (MAPQ) score
- Pool input files for peakcalling to give better peakcalling results
when inputs have poor coverage or lack sequening depth
- Perform Irreproducible Discovery Rate (IDR) analysis (described
further below) to get a consensus list of 'highly reproducible peaks'
and assess replicate quaility.
NOTE: WARNINGS!!!!
------------------
1. IDR analysis may not be approprate for all type of peak file - It works
best with transcription factor ChIPs or methodologies producing 'narrow peaks'
or peaks with well defined boundaries.
'BroadPeak' IDR (e.g. for widespread histone marks such as H3K27ac)
might not work because peak boundary's are harder to define and thus may
not be so reproducible between replicates
2. Always check your output from this pipeline in a genome browser to check
peaks are being called suffiently!
3. This pipeline references :term:`ChIP bams` throughout in the code -this
references the immunoprecipitated (IP) sample from a ChIP experiment
(i.e. the file you want to find peaks in), :term:`Input bams` refer to the
bams of the input control samples that are used for background
normalisation in peak calling. Although we refer to :term:`ChIP bams`
this is only nomenclature and you could just as easily use
an ATAC-Seq :term:`bam` file or other :term:`bam` files in which you are
looking for peaks.
4) Whilst you can call peaks with as many peakcallers that are implemented in
the pipeline, only the results from one peakcaller can be taken forward for IDR
analysis. If you want to run IDR analysis on the output of multiple peakcallers
you will need first run IDR with one peakcaller then clone the pipeline, modify
pipeline.yml file and delete the appropriate files to rerun the IDR analysis on
the output from a different peakcaller. Be warned that IDR analysis generates
a large number of peakfiles and it's best to decide on your prefered peakcaller
before running the IDR analysis.
References
==========
This pipeline follows closely the ENCODE3 version 1 peakprocessing pipeline
described by <NAME>'s group and the open source AQUAS TF ChIP-Seq
pipeline implemented by the Kundaje group:
* (https://docs.google.com/document/d/1lG_Rd7fnYgRpSIqrIfuVlAz2dW1VaSQThzk836Db99c/edit#heading=h.9ecc41kilcvq)
* (https://github.com/kundajelab/TF_chipseq_pipeline)
IDR analysis workflow is described here
* (https://sites.google.com/site/anshulkundaje/projects/idr)
for troubleshooting/discussion of the IDR workflow see and extra documentation
see:
* (https://groups.google.com/forum/#!forum/idr-discuss)
for ChIP and ATAC-Seq quality guidelines see:
* (https://www.encodeproject.org/data-standards/)
IDR Analysis
============
IDR analysis is used to:
* Give an indication of how reproducible the peaks that are produced by the
peakcallers are within a single sample
* Give an indication of how reproducible the peaks that are produced by the
peakcallers are within biological replicates
* produce a `conservative` peak list of highly reproducible peaks that
can be taken forward to downstream analysis
* produce an `oracle` peakset of the a large number of mostly reproducible
peaks that can be taken forward to downstream analysis
* sometimes the `conserative` and the `oracle` peakset will be the same
list.
* for further information on IDR analysis see the links above
Important notes:
IDR analysis requires peaks are called with a relaxed threshold to generate
a peaklist that contains (ideally) > 120,000 peaks that will contain
reproducible or 'true' peaks along with alot of irreproduible 'false' peaks.
Requirements
============
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
The software environment is handled by the cgatpipelines conda environment
and all software is installed as part of the installation process.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use cgat pipelines.
See :ref:`Tutorials` for a comprehensive introduction of how to run a
cgatPipeline.
Pipeline Input
==============
Sample_bam = bam file you want to call peaks on (i.e. ChiP Bam or ATAC-Seq Bam)
Input_bam = control file used as background reference in peakcalling
(e.g. input file for ChIP-seq)
pipeline.yml = File containing paramaters and options for
running the pipeline
design.tsv = This is a tab seperated file based on the design file for R package
DiffBind
It has the following collumns:
+---------+--------+--------+-----------+-----------+-----------+----------+-----------+--------------+
|SampleID | Tissue | Factor | Condition | Treatment | Replicate | bamReads | ControlID | bamControl |
+---------+--------+--------+-----------+-----------+-----------+----------+-----------+--------------+
|F123 |blood |H3K4 |normal |NA |1 |F123.bam | |F123_input.bam|
+---------+--------+--------+-----------+-----------+-----------+----------+-----------+--------------+
Pipeline output
===============
The aim of this pipeline is to output a list of peaks that
can be used for further downstream analysis.
The pipeline generates several new directories containing
output files - these can roughly be grouped into XXX main
stages of the pipeline
1) filtered_bams.dir
---------------------
Directory containing filtered bam files created by removing duplicates and
filtering of origional bam files. These filtered bam files are then taken
forward to IDR/peakcalling. If no filtering or deduplication is specified
in the ini file then this directory will contain symbolic links to the
origional bam files.
Directory contains:
* :term:`bams` files (and thier indexes) that have been filtered
according to specifications in pipeline.yml
* a number of log files detailing the number of reads that have been
filtered out for each reason.
* for paired-end samples a file with the frequency of fragment
lengths (the distance between the paired reads 5' start positions)
2) BigWigs
-------
Wiggle files that are normalised are generated depending on the type of ChIP-seq
i.e. ChIP-Rx is normalised to spike-ins.
3) IDR.dir
-------
Directory conatining the output files from IDR analysis
IDR is currently only set up to use with macs2 because this
is recomended by the authors of IDR. If you require IDR for broad
peaks it is recomended to use the macs2 broad peaks setting.
These include the lists of reproducible peaks and stats and
QC tables summarising the output of the IDR analysis
Directory contains:
* IDR_inputs.dir
This directory contains the files that are broad
IDR_inputs.dir
macs2.dir/
peakcalling_bams.dir/
peaks_for_IDR.dir/
pooled_bams.dir/
Peaksets:
Conservative Peakset = Only obtained if IDR analysis run
IDR analysis
This analysis does a comparision on a pair of peak files to
Tables
Contained in the database are several tables used for QC and
analysis
Code
====
"""
# load modules
from ruffus import transform, regex, active_if, follows, mkdir, jobs_limit, \
suffix, merge, add_inputs, originate, split, subdivide
from ruffus.combinatorics import *
import sys
import os
import math
import shutil
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cgatcore.experiment as E
import cgatcore.iotools as iotools
from cgatcore import pipeline as P
import cgatpipelines.tasks.mappingqc as mappingqc
import cgatpipelines.tasks.peakcalling as peakcalling
import cgat.BamTools.bamtools as Bamtools
import cgatcore.database as DB
#########################################################################
# Load PARAMS Dictionary from Pipeline.innni file options ###############
#########################################################################
# load options from pipeline.yml file into PARAMS dictionary
P.get_parameters(
["%s/pipeline.yml" % os.path.splitext(__file__)[0],
"../pipeline.yml",
"pipeline.yml"])
PARAMS = P.PARAMS
# add parameters from annotations pipeline.yml
PARAMS.update(P.peek_parameters(
PARAMS["annotations_dir"],
"genesets",
prefix="annotations_",
update_interface=True,
restrict_interface=True))
# load IDR parameters into a dictionary to pass to the IDR step
# IDR requires multiple parameters from the PARAMS dictionary
idrPARAMS = dict()
# get IDR peakcaller and params
idrpc = PARAMS['peakcalling_idrpeakcaller']
idrPARAMS['idrsuffix'] = PARAMS["%s_idrsuffix" % idrpc]
idrPARAMS['idrcol'] = PARAMS["%s_idrcol" % idrpc]
idrPARAMS['idrcolname'] = PARAMS['%s_idrcolname' % idrpc]
idrPARAMS['useoracle'] = PARAMS['IDR_useoracle']
#######################################################################
# Check for design file & Match ChIP/ATAC-Seq Bams with Inputs ########
#######################################################################
# This section checks for the design table and generates:
# 1. A dictionary, inputD, linking each input file and each of the various
# IDR subfiles to the appropriate input, as specified in the design table
# 2. A pandas dataframe, df, containing the information from the
# design table.
# 3. INPUTBAMS: a list of control (input) bam files to use as background for
# peakcalling.
# 4. CHIPBAMS: a list of experimental bam files on which to call peaks on.
# if design table is missing the input and chip bams to empty list. This gets
# round the import tests
if os.path.exists("design.tsv"):
df, inputD = peakcalling.readDesignTable("design.tsv",
PARAMS['IDR_poolinputs'])
INPUTBAMS = list(set(df['bamControl'].values))
CHIPBAMS = list(set(df['bamReads'].values))
else:
E.warn("design.tsv is not located within the folder")
INPUTBAMS = []
CHIPBAMS = []
# TODO we need to add code to pick up empty input and chipbams list and cause
# pipeline to throw an error
########################################################################
# Check if reads are paired end
########################################################################
if CHIPBAMS and Bamtools.is_paired(CHIPBAMS[0]):
PARAMS['paired_end'] = True
else:
PARAMS['paired_end'] = False
########################################################################
# Set template notebooks dir
########################################################################
if PARAMS['notebook_template_dir'] == '':
PARAMS['notebook_template_dir'] = '/'.join(
[PARAMS['pipelinedir'],
'pipeline_docs/pipeline_peakcalling/notebooks'])
###########################################################################
# start of pipelined tasks
# 1) Preprocessing Steps - Filter bam files & generate bam stats
###########################################################################
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform("design.tsv", suffix(".tsv"), ".load")
def loadDesignTable(infile, outfile):
''' load design.tsv to database '''
P.load(infile, outfile)
@active_if(PARAMS["have_input"] != 0)
@follows(mkdir("filtered_bams.dir"))
@transform(INPUTBAMS, regex("(.*).bam"),
[r"filtered_bams.dir/\1_filtered.bam",
r"filtered_bams.dir/\1_counts.tsv"])
def filterInputBAMs(infile, outfiles):
'''
Applies various filters specified in the pipeline.yml to the bam file
Currently implemented are filtering:
unwanted contigs based on partial name matching
unmapped reads
unpaired reads
duplicate reads
secondary alignment reads
reads below a mapping quality (MAPQ) score
reads overlapping with blacklisted regions specified in bed file.
'''
filters = P.as_list(PARAMS['filters_bamfilters'])
bedfiles = P.as_list(PARAMS['filters_bedfiles'])
blthresh = PARAMS['filters_blacklistthresh']
if blthresh != "":
blthresh = float(blthresh)
peakcalling.filterBams(infile, outfiles, filters, bedfiles,
blthresh,
PARAMS['paired_end'],
PARAMS['filters_strip'],
PARAMS['filters_qual'],
PARAMS['filters_contigs_to_remove'],
PARAMS['filters_keepint'],
PARAMS['filters_memory'],
PARAMS['filters_picard_options'])
@follows(mkdir("filtered_bams.dir"))
@transform(CHIPBAMS, regex("(.*).bam"), [r"filtered_bams.dir/\1_filtered.bam",
r"filtered_bams.dir/\1_counts.tsv"])
def filterChipBAMs(infile, outfiles):
'''
Applies various filters specified in the pipeline.yml to the bam file
Currently implemented are filtering:
unmapped reads
unpaired reads
duplicate reads
secondary alignment reads
reads below a mapping quality (MAPQ) score
reads overlapping with blacklisted regions specified in bed file.
'''
filters = P.as_list(PARAMS['filters_bamfilters'])
bedfiles = P.as_list(PARAMS['filters_bedfiles'])
blthresh = PARAMS['filters_blacklistthresh']
if blthresh != "":
blthresh = float(blthresh)
peakcalling.filterBams(infile, outfiles, filters, bedfiles,
float(blthresh),
PARAMS['paired_end'],
PARAMS['filters_strip'],
PARAMS['filters_qual'],
PARAMS['filters_contigs_to_remove'],
PARAMS['filters_keepint'],
PARAMS['filters_memory'],
PARAMS['filters_picard_options'])
# ############################################################################
# ##### Filtering Stats and QC
# ############################################################################
@transform((filterChipBAMs, filterInputBAMs), suffix("_filtered.bam"),
[r"\1_filtered.bam",
r"\1_counts.tsv"])
def filteredBams(infiles, outfiles):
''' dummy task to collect filtered bams and counts.tsv tables
for imput and chip file for downstream QC & Stats'''
@merge((filterChipBAMs, filterInputBAMs), "post_filtering_read_counts.tsv")
def mergeFilteringStats(infiles, outfile):
'''
Generates a table of read counts in each bam file after removal of:
duplicates: duplicates reads
secondary: secondary alignment
unpaired: unpaired reads
unmapped: unmapped reads
lowqual: low quality reads
blacklist_xxx: reads in the blacklist file xxx
contigs: removal of contigs that match patterns specified in ini file
'''
counts = [i[1] for i in infiles]
bigtab = pd.DataFrame()
for c in counts:
tab = | pd.read_csv(c, sep="\t") | pandas.read_csv |
import numpy as np
import pandas as pd
def loadStreamFile(filename):
'''
The MC `clustermove` generates an output stream if run with the `file` keyword.
This function loads this (potentially gzip compressed) file into a Pandas DataFrame.
Parameters
----------
filename : str
Input file generated by the clustermove using the `file` keyword (.dat|.gat.gz)
Returns
-------
pandas.DataFrame
Contents of the entire file as a dataframe
'''
data = np.loadtxt(filename, skiprows=1)
df = pd.DataFrame(data, columns=["cluster_size", "seed", "shape_anisotropy"])
return df
def plotShapeAnisotropy(df, cluster_size, **kwargs):
'''
Plots normalized shape anisotropy distribution for a given cluster size
Parameters
----------
df : pandas.DataFrame
Dataframe of the entire stream file; usually from `loadStreamFile`
cluster_size : int
Cluster size to analyse
**kwargs
All other plotting keyword arguments to be passed to matplotlib.pyplot.hist()
Returns
-------
matplotlib.axis
Axis object
'''
filtered = df[df.cluster_size==cluster_size] # analyse specific cluster size
weights = np.ones_like(filtered.index) / len(filtered.index) # normalize to unity
ax = filtered.hist(column="shape_anisotropy", weights = weights, **kwargs)[0][0]
ax.set_xlabel("Shape Anisotropy")
ax.set_ylabel("Probability")
ax.set_title("Shape Anisotropy ($N$={})".format(cluster_size));
return ax
def plotSizeDistribution(df, **kwargs):
'''
Plots normalized cluster size distribution
Parameters
----------
df : pandas.DataFrame
Dataframe of the entire stream file; usually from `loadStreamFile`
**kwargs
All other plotting keyword arguments to be passed to matplotlib.pyplot.hist()
Returns
-------
matplotlib.axis
Axis object
'''
weights = np.ones_like(df.index) / len(df.index) # normalize to unity
ax = df.hist(column="cluster_size", weights = weights, **kwargs)[0][0]
ax.set_xlabel("Cluster Size, $N$")
ax.set_ylabel("Probability")
ax.set_title("Cluster Size Distribution");
return ax
def statistics(df):
'''
Generates a dataframe with statistics about cluster size and shape.
The most probable cluster size is marked in RED
Parameters
----------
df : pandas.DataFrame
Dataframe of the entire stream file; usually from `loadStreamFile`
Returns
-------
pandas.DataFrame
Contents with statistics
'''
s1 = df.groupby(["cluster_size"]).shape_anisotropy.mean()
s2 = df.groupby(["cluster_size"]).seed.count()
d = | pd.concat([s1, s2], axis=1) | pandas.concat |
# coding: utf-8
# In[21]:
# preprocessing CMU CERT r.42 dataset
# : generate user based event log files from event-based files
data_dir = './cert_data/r4.2/'
to_dir = './processed_data/r4.2/'
# In[22]:
# original : 01_make_user_dictionary
import numpy as np
import pandas as pd
log = pd.read_csv( data_dir + 'logon.csv', sep=',')
users = sorted(log['user'].unique())
no = 0
user_dict = pd.DataFrame(users, index=range(0,len(users)),columns=['user'])
user_dict.to_csv(to_dir + 'dictionary.csv', sep=',')
answer = pd.read_csv('./cert_data/answers/answer_r4.2_all_org.csv', sep=',')
answer['userid'] = answer['user'].apply(lambda x: user_dict.index[user_dict['user'] == x][0])
answer.to_csv(to_dir + 'answer_r4.2_all.csv',index=False)
# In[6]:
import numpy as np
import pandas as pd
filename = 'logon.csv'
dataset = pd.read_csv(data_dir + filename, sep=',')
df = | pd.DataFrame(dataset) | pandas.DataFrame |
#!/usr/bin/env python
import os, sys
import pandas as pd
from pdb import set_trace
strProjectList = sys.argv[1]
#strProjectList = 'Project_list2.txt'
def Summation_all_final_result():
with open(strProjectList) as Input:
listdfResult = []
for i, strSample in enumerate(Input):
#print(strSample)
#if i == 2: break
strSample = strSample.replace('\n','').replace('\r','').strip()
strFinalResultDir = './Output/%s/Summary/Merge_target_result/' % strSample
for j, strFinalResultFile in enumerate(os.listdir(strFinalResultDir)):
if j > 0:
print('I expected one file, but there are more. check the target base change file')
sys.exit(1)
print(strFinalResultFile)
strFinalResultPath = './Output/%s/Summary/Merge_target_result/%s' % (strSample, strFinalResultFile)
listdfResult.append(pd.read_table(strFinalResultPath, low_memory=False))
dfAll = | pd.concat(listdfResult) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = | pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left') | pandas.merge |
import pandas as pd
location="measure1.csv"
e=pd.read_csv(location)
location1="measure22.csv"
e1=pd.read_csv(location1)
ids=[]
types=[]
thre1=400000000
thre2=100000000
#for first rule
for sh1,sh2 in enumerate(e['Id']):
if e.iloc[sh1,4]>=3 and e.iloc[sh1,7]>=thre1 and 0.4<=e.iloc[sh1,10]<=1:
ids.append(e.iloc[sh1,0])
types.append("type1")
#for second rule
first=[]
second=[]
third=[]
forth=[]
fifth=[]
sixth=[]
shenase=[]
typee=[]
max2=0
max3=0
for l,l1 in enumerate(e1['Id']):
if e1.iloc[l,7]>max2:
max2=e1.iloc[l,7]
for i in range(0,max2+1):
for i1,i2 in enumerate(e1['Id']):
if e1.iloc[i1,7]==i:
first.append(e1.iloc[i1,0])
second.append(e1.iloc[i1,1])
for i3,i4 in enumerate(first):
if second[i3] > max3:
max3=first[i3]
third.append(max3)
max3=0
first=[]
second=[]
for i8,i9 in enumerate(third):
if third[i8]!=0:
forth.append(third[i8])
for i5,i6 in enumerate(e1['Id']):
for i7,i10 in enumerate(forth):
if e1.iloc[i5,0]==forth[i7]:
fifth.append(e1.iloc[i5,4])
sixth.append(e1.iloc[i5,8])
for i10,i11 in enumerate(forth):
if (fifth[i10]>=thre2)and (sixth[i10]<=0.6):
ids.append(forth[i10])
types.append("type2")
#for third rule
for sh3,sh4 in enumerate(e1['Id']):
if e1.iloc[sh3,5]>=3 and e1.iloc[sh3,3]>=thre2 and 0<=e1.iloc[sh3,2]<0.6:
ids.append(e1.iloc[sh3,0])
types.append("type3")
rel=list(zip(ids,types))
pp= | pd.DataFrame(data=rel,columns=['id','type'] ) | pandas.DataFrame |
"""
TODO - basic:
- tbl.index.name setting
- tbl adding data - setting columns, appending, etc.
TODO - groupby:
- groupby options - groupby indexing (esp for expr groupbys)
- groupby push out VirtualTables
- groupby aggregate multiple agg types, dict agg
- groupby transform / apply?
TODO - joins:
- https://pandas.pydata.org/pandas-docs/stable/merging.html
- test all hows
- pd.concat (row-wise: UNION, UNION ALL)
- pd.merge (https://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging)
- todo: move df.join to pd.merge (more general)
"""
import copy
import operator
from functools import wraps, partialmethod, reduce
from collections.abc import Iterable
from warnings import warn
import numbers
import pandas as pd
import numpy as np
import sqlalchemy as sa
from sqlalchemy.sql import func
from sqlalchemy.dialects import mssql, postgresql
import sympy
from toolz import assoc, valfilter
#from odo.backends.sql import types as sa_types
#from odo.backends.sql import discover_typeengine
import datashape
__version__ = "0.1.0"
# -------------------------------------
# COPYING FROM ODO TO REMOVE DEPENDENCY
# from odo https://github.com/blaze/odo/blob/master/odo/backends/sql.py
sa_types = {
'int64': sa.BigInteger,
'int32': sa.Integer,
'int': sa.Integer,
'int16': sa.SmallInteger,
'float32': sa.REAL,
'float64': sa.FLOAT,
'float': sa.FLOAT,
'real': sa.FLOAT,
'string': sa.Text,
'date': sa.Date,
'time': sa.Time,
'datetime': sa.DateTime,
'bool': sa.Boolean,
"timedelta[unit='D']": sa.Interval(second_precision=0, day_precision=9),
"timedelta[unit='h']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='m']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='s']": sa.Interval(second_precision=0, day_precision=0),
"timedelta[unit='ms']": sa.Interval(second_precision=3, day_precision=0),
"timedelta[unit='us']": sa.Interval(second_precision=6, day_precision=0),
"timedelta[unit='ns']": sa.Interval(second_precision=9, day_precision=0),
# ??: sa.types.LargeBinary,
}
sa_revtypes = dict(map(reversed, sa_types.items()))
# Subclass mssql.TIMESTAMP subclass for use when differentiating between
# mssql.TIMESTAMP and sa.TIMESTAMP.
# At the time of this writing, (mssql.TIMESTAMP == sa.TIMESTAMP) is True,
# which causes a collision when defining the sa_revtypes mappings.
#
# See:
# https://bitbucket.org/zzzeek/sqlalchemy/issues/4092/type-problem-with-mssqltimestamp
class MSSQLTimestamp(mssql.TIMESTAMP):
pass
# Assign the custom subclass as the type to use instead of `mssql.TIMESTAMP`.
mssql.base.ischema_names['TIMESTAMP'] = MSSQLTimestamp
sa_revtypes.update({
sa.DATETIME: datashape.datetime_,
sa.TIMESTAMP: datashape.datetime_,
sa.FLOAT: datashape.float64,
sa.DATE: datashape.date_,
sa.BIGINT: datashape.int64,
sa.INTEGER: datashape.int_,
sa.BIGINT: datashape.int64,
sa.types.NullType: datashape.string,
sa.REAL: datashape.float32,
sa.Float: datashape.float64,
mssql.BIT: datashape.bool_,
mssql.DATETIMEOFFSET: datashape.string,
mssql.MONEY: datashape.float64,
mssql.SMALLMONEY: datashape.float32,
mssql.UNIQUEIDENTIFIER: datashape.string,
# The SQL Server TIMESTAMP value doesn't correspond to the ISO Standard
# It is instead just a binary(8) value with no relation to dates or times
MSSQLTimestamp: datashape.bytes_,
})
precision_types = {
sa.Float,
postgresql.base.DOUBLE_PRECISION
}
def precision_to_dtype(precision):
"""
Maps a float or double precision attribute to the desired dtype.
The mappings are as follows:
[1, 24] -> float32
[25, 53] -> float64
Values outside of those ranges raise a ``ValueError``.
Parameter
---------
precision : int
A double or float precision. e.g. the value returned by
`postgresql.base.DOUBLE_PRECISION(precision=53).precision`
Returns
-------
dtype : datashape.dtype (float32|float64)
The dtype to use for columns of the specified precision.
"""
if isinstance(precision, numbers.Integral):
if 1 <= precision <= 24:
return float32
elif 25 <= precision <= 53:
return float64
raise ValueError("{} is not a supported precision".format(precision))
# interval types are special cased in discover_typeengine so remove them from
# sa_revtypes
sa_revtypes = valfilter(lambda x: not isinstance(x, sa.Interval), sa_revtypes)
def discover_typeengine(typ):
if isinstance(typ, sa.Interval):
if typ.second_precision is None and typ.day_precision is None:
return datashape.TimeDelta(unit='us')
elif typ.second_precision == 0 and typ.day_precision == 0:
return datashape.TimeDelta(unit='s')
if typ.second_precision in units_of_power and not typ.day_precision:
units = units_of_power[typ.second_precision]
elif typ.day_precision > 0:
units = 'D'
else:
raise ValueError('Cannot infer INTERVAL type with parameters'
'second_precision=%d, day_precision=%d' %
(typ.second_precision, typ.day_precision))
return datashape.TimeDelta(unit=units)
if type(typ) in precision_types and typ.precision is not None:
return precision_to_dtype(typ.precision)
if typ in sa_revtypes:
return datashape.dshape(sa_revtypes[typ])[0]
if type(typ) in sa_revtypes:
return sa_revtypes[type(typ)]
if isinstance(typ, sa.Numeric):
return datashape.Decimal(precision=typ.precision, scale=typ.scale)
if isinstance(typ, (sa.String, sa.Unicode)):
return datashape.String(typ.length, 'U8')
else:
for k, v in sa_revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and
isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
# -------------------------------------
# END COPYING FROM ODO
# -------------------------------------
def is_striter(val):
return isinstance(val, Iterable) and all(isinstance(el, str) for el in val)
def is_iter_notstr(val):
return isinstance(val, Iterable) and not isinstance(val, str)
def and_(*args):
return reduce(operator.and_, args)
def _dtype(type_name):
if type_name == "string":
type_name = "object"
return np.dtype(type_name)
class DB:
def __init__(self, engine, verbose=False, check="auto",
autoindex=True):
if isinstance(engine, str):
engine = sa.create_engine(engine, echo=verbose)
else:
engine.echo = verbose
self.engine = engine
if check == "auto":
try:
from IPython import get_ipython
check = get_ipython() is not None
except ImportError:
check = False
self.check = check
self.autoindex = autoindex
@property
def metadata(self):
return sa.MetaData().reflect(bind=self.engine)
@property
def tables(self):
return self.engine.table_names()
def __iter__(self):
return iter(self.tables)
def __contains__(self, k):
return k in self.tables
def __len__(self):
return len(self.tables)
def __getitem__(self, k):
assert not self.check or k in self
return Table(self.engine, k, check=self.check,
index=self.autoindex)
def __setitem__(self, k, v):
if k not in self:
metadata, _ = Table.from_df(v, k)
metadata.create_all(self.engine)
self[k].append(v)
else:
raise NotImplementedError()
_colobjtypes = {
str: sa.String
}
def to_sqlalchemy_type(s):
if s.dtype.name in sa_types:
return sa_types[s.dtype.name]
el = s.iloc[0]
if type(el).__name__ in sa_types:
return sa_types[s.dtype.name]
for k, v in _colobjtypes.items():
if isinstance(el, k):
return v
raise TypeError("unknown type: %s / %s" % (s.dtype.name, type(el)))
_numeric_types = [typ for typ in sa_types if any(
typ.startswith(numtyp) for numtyp in ['bool', 'float', 'int', 'timedelta'])]
class VirtualTable:
def __init__(self, engine, salc, check=True,
whereclause=None, from_i=None, to_i=None,
sort_by=[], # (by, asc) tuples
index=True, columns=None):
self.engine = engine
self.sa = salc
self._whereclause = whereclause
self._from_i = from_i
self._to_i = to_i
self._sort_by = sort_by
if isinstance(index, (str, Expression)):
index = [index]
if index == True: # auto-detect
self._ix = [c.name for c in self.sa_columns if c.primary_key]
self._ixdata = [c for c in self.sa_columns if c.primary_key]
elif is_striter(index):
self._ix = list(index)
self._ixdata = [self.sa_colmap[col] for col in self._ix]
elif index == False or index is None:
self._ix = []
self._ixdata = []
elif all(isinstance(ix, Expression) for ix in index):
self._ix = [c.name for c in index]
self._ixdata = list(index)
if columns is None:
self._columns = [c.name for c in self.sa_columns if not c.name in self._ix]
self._coldata = [c for c in self.sa_columns if not c.name in self._ix]
elif is_striter(columns):
self._columns = list(columns)
self._coldata = [self.sa_colmap[col] for col in self._columns]
elif all(isinstance(col, Expression) for col in columns):
self._columns = [c.name for c in columns]
self._coldata = list(columns)
def copy(self, **new_attrs):
new = copy.copy(self)
for k, v in new_attrs.items():
setattr(new, k, v)
return new
## column stuffs
@property
def sa_columns(self):
cols = self.sa.columns
self.__dict__['sa_columns'] = cols
return cols
@property
def sa_colmap(self):
colmap = {c.name: c for c in self.sa_columns}
self.__dict__['sa_colmap'] = colmap
return colmap
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, column_names):
assert len(column_names) == len(self._coldata)
self._columns = column_names
def _colmatches(self, col, singleton=False, required=False):
matches = [datum for name, datum in zip(self._columns, self._coldata)
if col == name]
if required and not matches:
raise KeyError("key %r not found among %r" % (col, self._columns))
if singleton:
if len(matches) > 1:
raise KeyError("ambiguous key %r among %r" % (col, self._columns))
matches = matches[0] if matches else None
return matches
def rename(self, columns=None):
if columns is not None:
if isinstance(columns, Mapping):
new_cols = [columns.get(col, col) for col in self._columns]
elif isinstance(columns, Callable):
new_cols = [columns(col) for col in self._columns]
else:
raise TypeError("unknown mapper type: %s" % (type(columns)))
return self.copy(_columns=new_cols)
return self
@property
def coltypes(self):
cols = [c for c in self.sa_columns if not c.name in self._ix]
return pd.Series([str(discover_typeengine(c.type)) for c in cols],
index=[c.name for c in cols])
@property
def dtypes(self):
return self.coltypes.map(_dtype)
def iteritems(self):
yield from zip(self._columns, self._coldata)
items = iteritems
def keys(self):
yield from self._columns
__iter__ = keys
def __getitem__(self, k):
if isinstance(k, str):
colmatches = self._colmatches(k, required=True)
if len(colmatches) == 1:
return Expression(self, colmatches[0], k)
else:
return self.copy(_columns=[k]*len(colmatches), _coldata=colmatches)
elif is_striter(k):
new_columns = []
new_coldata = []
for el in k:
colmatches = self._colmatches(el, required=True)
new_columns += [el] * len(colmatches)
new_coldata += colmatches
return self.copy(_columns=new_columns, _coldata=new_coldata)
elif isinstance(k, slice):
return self.islice(k)
elif isinstance(k, Expression):
return self.where(k)
return self._loc(k)
## indexing
@property
def index(self):
if len(self._ix) == 0:
return None
if len(self._ix) == 1:
return Expression(self, self._ixdata[0], self._ix[0])
else:
# multindex...return dataframe??
return self.copy(_columns=list(_ix), _coldata=list(_ixdata))
def reset_index(self, drop=False):
if drop:
return self.copy(_ix=[], _ixdata=[])
return self.copy(_ix=[], _ixdata=[], _columns=self._columns + self._ix,
_coldata=self._coldata + self._ixdata)
def set_index(self, keys, drop=True, append=False):
if isinstance(keys, (str, Expression)):
keys = [keys]
new_ix = list(self._ix) if append else []
new_ixdata = list(self._ixdata) if append else []
new_columns = list(self._columns)
new_coldata = list(self._coldata)
for k in keys:
if isinstance(k, str):
new_ixdata.append(self._colmatches(k, singleton=True, required=True))
new_ix.append(k)
if drop:
ix = new_columns.index(k)
new_columns.pop(ix)
new_coldata.pop(ix)
elif isinstance(k, Expression):
new_ixdata.append(k)
new_ix.append(k.name)
return self.copy(_ix=new_ix, _ixdata=new_ixdata,
_columns=new_columns, _coldata=new_coldata)
## location
def _lookup(self, k):
result = self.where(self.index == k).df
if len(result) == 1: # and not isinstance(k, sa.sql.elements.ClauseElement):
return result.iloc[0]
elif len(result) == 0:
raise KeyError("%r not found in %s" % (k, self.index))
return result
def _loc(self, k):
# actually returns a dataframe/series for lookups
# .loc[normal loc, columns??]
if isinstance(k, tuple) and len(k) == 2:
condition, cols = k
if isinstance(cols, str) or is_striter(cols):
return self._loc(condition)[cols]
if isinstance(k, slice):
# slice (greater than: less than)
if k.step is not None:
return self._loc(slice(k.start, k.stop))[::k.step]
if k.start is None and k.stop is not None:
return self.where(self.index <= k.stop)
if k.start is not None and k.stop is None:
return self.where(self.index >= k.start)
if k.start is not None and k.stop is not None:
return self.where(self.index >= k.start & self.index <= k.stop)
return self
if isinstance(k, Expression):
# boolean array?
return self.where(k)
elif is_iter_notstr(k):
# list of elements
results = [self._lookup(el) for el in k]
result = pd.concat([ | pd.DataFrame([r]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
import warnings
import scipy as sp
from scipy import stats
try:
import sklearn
except ImportError:
sklearn = False
else:
from sklearn.decomposition import PCA
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from . import filters, process
from .utils import get_protein_id
def correlation(df, rowvar=False):
"""
Calculate column-wise Pearson correlations using ``numpy.ma.corrcoef``
Input data is masked to ignore NaNs when calculating correlations. Data is returned as
a Pandas ``DataFrame`` of column_n x column_n dimensions, with column index copied to
both axes.
:param df: Pandas DataFrame
:return: Pandas DataFrame (n_columns x n_columns) of column-wise correlations
"""
# Create a correlation matrix for all correlations
# of the columns (filled with na for all values)
df = df.copy()
maskv = np.ma.masked_where(np.isnan(df.values), df.values)
cdf = np.ma.corrcoef(maskv, rowvar=False)
cdf = pd.DataFrame(np.array(cdf))
cdf.columns = df.columns
cdf.index = df.columns
cdf = cdf.sort_index(level=0, axis=1)
cdf = cdf.sort_index(level=0)
return cdf
def pca(df, n_components=2, mean_center=False, **kwargs):
"""
Principal Component Analysis, based on `sklearn.decomposition.PCA`
Performs a principal component analysis (PCA) on the supplied dataframe, selecting the first ``n_components`` components
in the resulting model. The model scores and weights are returned.
For more information on PCA and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PCA
:param kwargs: additional keyword arguments to `sklearn.decomposition.PCA`
:return: scores ``DataFrame`` of PCA scores n_components x n_samples
weights ``DataFrame`` of PCA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PCA analysis')
from sklearn.decomposition import PCA
df = df.copy()
# We have to zero fill, nan errors in PCA
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
pca = PCA(n_components=n_components, **kwargs)
pca.fit(df.values.T)
scores = pd.DataFrame(pca.transform(df.values.T)).T
scores.index = ['Principal Component %d (%.2f%%)' % ( (n+1), pca.explained_variance_ratio_[n]*100 ) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(pca.components_).T
weights.index = df.index
weights.columns = ['Weights on Principal Component %d' % (n+1) for n in range(0, weights.shape[1])]
return scores, weights
def plsda(df, a, b, n_components=2, mean_center=False, scale=True, **kwargs):
"""
Partial Least Squares Discriminant Analysis, based on `sklearn.cross_decomposition.PLSRegression`
Performs a binary group partial least squares discriminant analysis (PLS-DA) on the supplied
dataframe, selecting the first ``n_components``.
Sample groups are defined by the selectors ``a`` and ``b`` which are used to select columns
from the supplied dataframe. The result model is applied to the entire dataset,
projecting non-selected samples into the same space.
For more information on PLS regression and the algorithm used, see the `scikit-learn documentation <http://scikit-learn.org/stable/modules/generated/sklearn.cross_decomposition.PLSRegression.html>`_.
:param df: Pandas ``DataFrame`` to perform the analysis on
:param a: Column selector for group a
:param b: Column selector for group b
:param n_components: ``int`` number of components to select
:param mean_center: ``bool`` mean center the data before performing PLS regression
:param kwargs: additional keyword arguments to `sklearn.cross_decomposition.PLSRegression`
:return: scores ``DataFrame`` of PLSDA scores n_components x n_samples
weights ``DataFrame`` of PLSDA weights n_variables x n_components
"""
if not sklearn:
assert('This library depends on scikit-learn (sklearn) to perform PLS-DA')
from sklearn.cross_decomposition import PLSRegression
df = df.copy()
# We have to zero fill, nan errors in PLSRegression
df[ np.isnan(df) ] = 0
if mean_center:
mean = np.mean(df.values, axis=0)
df = df - mean
sxa, _ = df.columns.get_loc_level(a)
sxb, _ = df.columns.get_loc_level(b)
dfa = df.iloc[:, sxa]
dfb = df.iloc[:, sxb]
dff = pd.concat([dfa, dfb], axis=1)
y = np.ones(dff.shape[1])
y[np.arange(dfa.shape[1])] = 0
plsr = PLSRegression(n_components=n_components, scale=scale, **kwargs)
plsr.fit(dff.values.T, y)
# Apply the generated model to the original data
x_scores = plsr.transform(df.values.T)
scores = pd.DataFrame(x_scores.T)
scores.index = ['Latent Variable %d' % (n+1) for n in range(0, scores.shape[0])]
scores.columns = df.columns
weights = pd.DataFrame(plsr.x_weights_)
weights.index = df.index
weights.columns = ['Weights on Latent Variable %d' % (n+1) for n in range(0, weights.shape[1])]
loadings = | pd.DataFrame(plsr.x_loadings_) | pandas.DataFrame |
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import MiniBatchKMeans,DBSCAN
from sklearn.preprocessing import normalize
import pandas as pd
import numpy as np
from DBSCAN import MyDBSCAN
dataset_path = "../data/dataset_TIST2015/"
def Read_Data(filename,limit):
chunksize = 10 ** 6
count = 0
data_df = pd.DataFrame()
for chunk in pd.read_csv(dataset_path+filename,
chunksize= chunksize):
if(count == limit):
break
else:
data_df = pd.concat([data_df,chunk], ignore_index=True)
print("Chunk ",filename," ",count," : Processed")
count += 1
return data_df
def LocationCluster(finaldf_local):
def DBSCAN_Cluster(data):
#datanew = data.as_matrix(columns=["Latitude","Longitude"])
print("Clustering the Data")
labels = []
meters = 100
eps = meters / 10000
# clustered_lat_long = DBSCAN(eps = 0.01, min_samples = 100)
# clustered_lat_long.fit(data)
# labels = clustered_lat_long.labels_
for i in data.mb_cluster.unique():
subset = data.loc[data.mb_cluster == i]
clustered_lat_long = DBSCAN(eps = eps, min_samples = 100)
datanew = subset.as_matrix(columns=["Latitude","Longitude"])
clustered_lat_long.fit(datanew)
subset['dbscan_cluster'] = clustered_lat_long.labels_
data.loc[data.mb_cluster == i, 'dbscan_cluster'] = subset['dbscan_cluster']
print("Cluster Labels: ", labels)
return labels
def Normalize(scaled_data):
print("Normalizing Data according to Gaussian Distribution")
normalized_data = normalize(scaled_data)
x_normalized_data = | pd.DataFrame(normalized_data,columns=["Latitude","Longitude","mb_cluster"]) | pandas.DataFrame |
"""Base class for Machine Learning Efficacy based metrics for Time Series."""
import numpy as np
import pandas as pd
import rdt
from sklearn.model_selection import train_test_split
from sdmetrics.goal import Goal
from sdmetrics.timeseries.base import TimeSeriesMetric
class TimeSeriesEfficacyMetric(TimeSeriesMetric):
"""Base class for Machine Learning Efficacy based metrics on time series.
These metrics build a Machine Learning Classifier that learns to tell the synthetic
data apart from the real data, which later on is evaluated using Cross Validation.
The output of the metric is one minus the average ROC AUC score obtained.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = 'TimeSeries Efficacy'
goal = Goal.MAXIMIZE
min_value = 0.0
max_value = np.inf
@classmethod
def _validate_inputs(cls, real_data, synthetic_data, metadata, entity_columns, target):
metadata, entity_columns = super()._validate_inputs(
real_data, synthetic_data, metadata, entity_columns)
if 'target' in metadata:
target = metadata['target']
elif target is None:
raise TypeError('`target` must be passed either directly or inside `metadata`')
return entity_columns, target
@staticmethod
def _build_xy(transformer, data, entity_columns, target_column):
X = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series( | SparseArray([1, 0, 0]) | pandas.core.arrays.SparseArray |
import seaborn as sns
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from datetime import datetime
def plot_probability_fit(results_df, run, size_obs, hue_vec, hue_col, facet_list, facet_col, out_dir='images/'):
cols_plot = 3
rows_plot = len(facet_list) // cols_plot
rows_plot += len(facet_list) % cols_plot
color_vec = sns.color_palette("cubehelix", len(hue_vec))
fig = plt.figure(figsize=(30, 20))
for j, facet in enumerate(facet_list):
temp_df = results_df[results_df[facet_col] == facet]
ax = fig.add_subplot(rows_plot, cols_plot, j + 1)
for ii, hue in enumerate(hue_vec):
sns.regplot(x='est_prob', y='true_prob', label=hue,
data=temp_df[temp_df[hue_col] == hue],
color=color_vec[ii],
lowess=True, scatter=False)
plt.scatter(x=np.arange(0, 1, 0.01), y=np.arange(0, 1, 0.01), linestyle=':', color='black', alpha=0.1)
plt.ylabel('True Probability', fontsize=20)
plt.xlabel('Estimated Probability', fontsize=20)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.title('%s: %s' % (facet_col, facet), fontsize=20)
plt.legend(loc='best')
outfile_name = 'probability_fit_%sobs_%s_%s_hue_%s_facet_%s.pdf' % (
size_obs, run, hue_col, facet_col,
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir+outfile_name)
plt.close()
def plot_loss_classifiers(results_df, run, size_obs, class_col, x_col, out_dir='images/'):
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1)
results_df[x_col + '_log'] = np.log(results_df[x_col].values)
sns.scatterplot(x=x_col + '_log', y='loss', hue=class_col, data=results_df, palette='cubehelix', s=200)
sns.lineplot(x=x_col + '_log', y='loss', hue=class_col, data=results_df, palette='cubehelix')
plt.ylabel('Cross-Entropy Loss (Logarithm)', fontsize=25)
plt.xlabel('Sample Size', fontsize=25)
plt.legend(loc='best', fontsize=25)
plt.title('Loss Function Value vs. Sample Size', fontsize=32)
plt.xticks(results_df[x_col + '_log'].unique(), [str(x) for x in results_df[x_col].unique()])
ax.legend(markerscale=3)
outfile_name = 'probability_loss_function_fit_%sobs_%s_%s.pdf' % (
size_obs, run, datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_loss_classifiers_specific_ss(results_df, run, size_obs, hue, class_col,
loss_col, marginal, out_dir='images/', entropy=False):
fig = plt.figure(figsize=(20, 10))
sns.boxplot(x=class_col, y=loss_col, hue=hue, data=results_df, palette='cubehelix')
label_y = 'Cross-Entropy Loss' if 'cross' in loss_col else 'Brier Score (Logarithm)'
plt.ylabel('%s' % label_y, fontsize=28)
plt.xlabel('Classifier', fontsize=28)
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
if entropy:
plt.axhline(y=results_df['entropy'].values[0], linestyle='--', color='k', alpha=0.75,
label='True Distribution Entropy')
plt.legend(loc='best', fontsize=28)
g_label = 'Parametric Fit of Marginal' if marginal else 'Reference'
plt.title('%s, %s Example, G: %s' % (label_y, run.title(), g_label), fontsize=32)
outfile_name = 'probability_loss_function_fit_%sobs_%s_%s_%s_%s.pdf' % (
size_obs, run, loss_col, 'marginalg' if marginal else 'referenceg',
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_loss_classifiers_cde(results_df, run, b, hue, class_col, loss_col, marginal, out_dir='images/'):
fig = plt.figure(figsize=(20, 10))
sns.boxplot(x=class_col, y=loss_col, hue=hue, data=results_df, palette='cubehelix')
label_y = 'Pinball Loss'
plt.ylabel('%s (Logarithm)' % label_y, fontsize=28)
plt.xlabel('Quantile Classifier', fontsize=28)
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.legend(loc='best', fontsize=28)
g_label = 'Parametric Fit of Marginal' if marginal else 'Reference'
plt.title('%s, %s Example, G: %s, B=%s' % (label_y, run.title(), g_label, b), fontsize=32)
outfile_name = 'quantile_pinball_loss_function_fit_bval%s_%s_%s_%s_%s.pdf' % (
b, run, loss_col, 'marginalg' if marginal else 'referenceg',
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_loss_true_cde(results_df, run, hue, class_col, loss_col, out_dir='images/', extra_title=''):
fig = plt.figure(figsize=(20, 10))
sns.boxplot(x=class_col, y=loss_col, hue=hue, data=results_df, palette='cubehelix')
label_y = 'Pinball Loss'
plt.ylabel('%s (Logarithm)' % label_y, fontsize=28)
plt.xlabel('Quantile Classifier', fontsize=28)
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.legend(loc='best', fontsize=28)
plt.title('%s, %s Example, %s' % (label_y, run.title(), extra_title), fontsize=32)
outfile_name = 'quantile_pinball_true_loss_function_fit_%s_%s_%s_%s.pdf' % (
run, loss_col, extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_cutoff_cde(results_df, run, x_col, y_col, hue, true_col, t0_val,
out_dir='images/', extra_title=''):
b_prime_values = results_df['b_prime'].unique()
for b_prime in b_prime_values:
fig = plt.figure(figsize=(20, 10))
temp_df = results_df[results_df['b_prime'] == b_prime]
sns.boxplot(x=x_col, y=y_col, hue=hue, data=temp_df, palette='cubehelix')
line_df = temp_df.sort_values(by=x_col)[[x_col, true_col]].groupby(x_col).mean().reset_index()
plt.scatter(x=range(len(line_df[x_col].values)), y=line_df[true_col].values, color='blue', s=250, label='True C')
plt.ylabel('Estimated Cutoff', fontsize=28)
plt.xlabel(r'$\theta_0$', fontsize=28)
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.legend(loc='best', fontsize=28)
plt.title(r'Estimated and True Cutoffs, %s Example, %s, %s, $\theta_0$=%s' % (
run.title(), b_prime, extra_title, t0_val), fontsize=32)
outfile_name = 'cutoff_estimates_true_fit_%s_t0val_%s_bprime%s_%s_%s.pdf' % (
run, t0_val, b_prime.split('=')[-1].replace(' ', '_'), extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_diff_cov_cde(results_df, run, x_col, y_col, hue, out_dir='images/', extra_title=''):
b_prime_values = results_df['b_prime'].unique()
class_values = results_df[hue].unique()
cols_plot = 3
rows_plot = len(class_values) // cols_plot
rows_plot += len(class_values) % cols_plot if len(class_values) > 3 else 1
color_vec = sns.color_palette("cubehelix", len(class_values))
for b_prime in b_prime_values:
fig = plt.figure(figsize=(20, 8))
for j, classifier in enumerate(class_values):
ax = fig.add_subplot(rows_plot, cols_plot, j + 1)
temp_df = results_df[(results_df['b_prime'] == b_prime) & (results_df[hue] == classifier)]
plt.scatter(temp_df[x_col].values, temp_df[y_col].values, color=color_vec[j])
extra_sign = '^\star' if 'true' in extra_title.lower() else ''
plt.ylabel(r'$\mathbb{I}(\tau%s \leq C) - \mathbb{I}(\tau%s \leq \hat{C}_\theta)$' % (
extra_sign, extra_sign), fontsize=20)
plt.xlabel(r'$\Theta$', fontsize=20)
plt.title('%s, %s, %s' % (classifier.replace('\n', ''), '\n' + b_prime, extra_title), fontsize=24)
outfile_name = 'coverage_diff_plot_%s_%s_%s_%s.pdf' % (
run, b_prime.split('=')[-1].replace(' ', '_'), extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_error_rate_cutoff_true_tau(results_df, run, x_col, y_col, hue,
out_dir='images/cutoff_true_tau_analysis/', extra_title=''):
results_df_mean = results_df[[x_col, y_col, hue, 'rep']].groupby(
[x_col, hue, 'rep']).mean().reset_index()
results_df_mean[x_col + 'plot'] = results_df_mean[x_col].apply(lambda x: int(x.split('=')[1]))
fig = plt.figure(figsize=(12, 8))
sns.lineplot(x=x_col + 'plot', y=y_col, hue=hue, data=results_df_mean)
plt.xlabel("Training Sample Size B'", fontsize=20)
plt.ylabel("Average Accuracy \n (Across Repetitions)", fontsize=20)
plt.title(r'Average Accuracy in Estimating $k(\tau^{\star})$ vs. $\hat{k}(\tau^{\star})$', fontsize=24)
plt.ylim([0, 1.05])
plt.axhline(y=1, linestyle='--')
plt.legend(loc='best')
outfile_name = 'error_rate_true_tau_plot_%s_%s_%s.pdf' % (
run, extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_theta_errors_cutoff_true_tau(results_df, run, x_col, y_col, hue, across_col,
out_dir='images/cutoff_true_tau_analysis/', extra_title=''):
results_df_mean = results_df[[x_col, y_col, hue, across_col, 'rep']].groupby(
[x_col, hue, across_col, 'rep']).mean().reset_index()
results_df_mean[y_col] = 1 - results_df_mean[y_col].values
across_values = results_df[across_col].unique()
cols_plot = 3
rows_plot = len(across_values) // cols_plot
rows_plot += len(across_values) % cols_plot if len(across_values) > 3 else 1
fig = plt.figure(figsize=(20, 8))
for j, classifier in enumerate(across_values):
ax = fig.add_subplot(rows_plot, cols_plot, j + 1)
temp_df = results_df_mean[(results_df_mean[across_col] == classifier)]
sns.lineplot(x=x_col, y=y_col, hue=hue, data=temp_df, color='cubehelix')
plt.xlabel(r'$\theta_0$')
plt.ylabel("Average Accuracy \n (Across Repetitions)")
plt.title(r'Average Accuracy in Estimating $k(\tau^{\star})$ vs. $\hat{k}(\tau^{\star})$ %s' % (
'\n' + classifier.replace('\n', '-')))
plt.ylim([0, 1.05])
plt.axhline(y=1, linestyle='--')
outfile_name = 'theta_error_rate_plot_%s_%s_%s.pdf' % (
run, extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_error_rate_cutoff_est_tau(results_df, run, x_col, y_col, hue, t0_val, classifier,
out_dir='images/classifier_tau_analysis/', extra_title=''):
results_df_mean = results_df[[x_col, y_col, hue, 'rep']].groupby(
[x_col, hue, 'rep']).mean().reset_index()
results_df_mean[x_col + 'plot'] = results_df_mean[x_col].apply(lambda x: int(x.split('=')[1]))
fig = plt.figure(figsize=(12, 8))
sns.lineplot(x=x_col + 'plot', y=y_col, hue=hue, data=results_df_mean)
plt.xlabel("Training Sample Size B'", fontsize=20)
plt.ylabel("Average Accuracy \n (Across Repetitions)", fontsize=20)
plt.title(r'Average Accuracy in Estimating $k(\tau)$ vs. $\hat{k}(\tau)$ %s $\theta_0$=%s, Classifier=%s' % (
'\n', t0_val, classifier
), fontsize=24)
plt.ylim([0.5, 1.05])
plt.axhline(y=1, linestyle='--')
plt.legend(loc='best')
outfile_name = 'error_rate_classifier_%s_tau_plot_t0val_%s_%s_%s_%s.pdf' % (
classifier.replace('\n', '-'), run, extra_title.replace(' ', '_'), t0_val,
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_theta_errors_cutoff_est_tau(results_df, run, x_col, y_col, hue, across_col, t0_val, classifier_discr,
out_dir='images/classifier_tau_analysis/', extra_title=''):
results_df_mean = results_df[[x_col, y_col, hue, across_col, 'rep']].groupby(
[x_col, hue, across_col, 'rep']).mean().reset_index()
results_df_mean[y_col] = 1 - results_df_mean[y_col].values
across_values = results_df[across_col].unique()
cols_plot = 3
rows_plot = len(across_values) // cols_plot
rows_plot += len(across_values) % cols_plot if len(across_values) > 3 else 1
fig = plt.figure(figsize=(20, 8))
for j, classifier in enumerate(across_values):
ax = fig.add_subplot(rows_plot, cols_plot, j + 1)
temp_df = results_df_mean[(results_df_mean[across_col] == classifier)]
sns.lineplot(x=x_col, y=y_col, hue=hue, data=temp_df, color='cubehelix')
plt.xlabel(r'$\theta_0$')
plt.ylabel("Average Accuracy \n (Across Repetitions)")
plt.title(r'Average Accuracy in Estimating $k(\tau)$ vs. $\hat{k}(\tau)$ %s $\theta_0$=%s, Classifier=%s ' % (
'\n' + classifier.replace('\n', '-'), t0_val, classifier_discr))
plt.ylim([0, 1.05])
plt.axhline(y=1, linestyle='--')
outfile_name = 'theta_classifier_%s_error_rate_plot_t0val_%s_%s_%s_%s.pdf' % (
classifier_discr.replace('\n', '-'), t0_val, run, extra_title.replace(' ', '_'),
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
def plot_odds_fit(results_df, run, size_obs, hue_vec, hue_col, facet_list, facet_col, out_dir='images/'):
cols_plot = 3
rows_plot = len(facet_list) // cols_plot
rows_plot += len(facet_list) % cols_plot
color_vec = sns.color_palette("cubehelix", len(hue_vec))
fig = plt.figure(figsize=(30, 20))
for j, facet in enumerate(facet_list):
temp_df = results_df[results_df[facet_col] == facet]
ax = fig.add_subplot(rows_plot, cols_plot, j + 1)
for ii, hue in enumerate(hue_vec):
sns.regplot(x='est_odds', y='true_odds', label=hue,
data=temp_df[temp_df[hue_col] == hue],
color=color_vec[ii], lowess=True,
line_kws={'lw': 2}, scatter=False)
est_odds = temp_df['est_odds'].values
est_odds[est_odds == np.inf] = 0
x_95 = np.quantile(est_odds[~np.isnan(est_odds)], q=.75)
y_95 = np.quantile(temp_df['true_odds'].values, q=.75)
plt.scatter(x=np.linspace(start=0, stop=x_95, num=100), y=np.linspace(start=0, stop=y_95, num=100),
linestyle=':', color='black', alpha=0.1)
plt.ylabel('True Odds')
plt.xlabel('Estimated Odds')
plt.xlim([0, x_95])
plt.ylim([0, y_95])
plt.title('%s: %s' % (facet_col, facet))
plt.legend(loc='best')
outfile_name = 'odds_fit_%sobs_%s_%s_hue_%s_facet_%s.pdf' % (
size_obs, run, hue_col, facet_col,
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
plt.tight_layout()
plt.savefig(out_dir+outfile_name)
plt.close()
def or_over_t1(results_df, t1_linspace, or_vals, run, classifier, ss, t0_val, marginal, out_dir='images/'):
fig, ax = plt.subplots(figsize=(20, 10))
# Calculate minimum of results_df
plot_df = results_df[['t1_round', 'OR', 'sample_size_str']]
mean_df = plot_df.groupby(['t1_round', 'sample_size_str']).mean().reset_index()
min_value_true = results_df['true_min_val'].values[0]
g_label = 'Parametric Fit of Marginal' if marginal else 'Reference'
dict_min = {}
for ss_str in mean_df['sample_size_str'].unique():
temp_df = mean_df[mean_df['sample_size_str'] == ss_str]
idx = np.where(temp_df['OR'].values == np.min(temp_df['OR'].values))[0]
dict_min[ss_str] = t1_linspace[idx][0]
# plot_df['sample_size_and_min'] = plot_df['sample_size_str'].apply(
# lambda x: '%s, Minimum at: %s' % (x, round(dict_min[x], 3))
# )
sns.lineplot(x='t1_round', y='OR', hue='sample_size_str', data=plot_df)
or_plot_df = | pd.DataFrame.from_dict(data={'t1': t1_linspace, 'OR': or_vals}) | pandas.DataFrame.from_dict |
""" Summarize daily traffic data """
import pandas as pd
from pathlib import Path
data_path = Path("data/raw/data.csv")
d = | pd.read_csv(data_path, low_memory=False) | pandas.read_csv |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
import argparse
import json
import logging
import requests
from copy import deepcopy
import pandas as pd
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def parse_args():
"""
parse input args
"""
parser = argparse.ArgumentParser()
# for local excel analysis
parser.add_argument(
"--log_file", type=str, default="result.log", help="ci result log path")
return parser.parse_args()
def _find_char(input_char):
"""
find english char in input string
"""
result = re.findall(r'[a-zA-Z=_/0-9.]+', str(input_char))
return result
def process_log(file_name: str):
"""
process log
"""
train_list_dict = []
export_list_dict = []
predict_det_list_dict = []
with open(file_name, 'r') as f:
for i, data in enumerate(f.readlines()):
# print(i, data)
train_dict = {}
if "train.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
train_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
train_dict["status"] = "passed"
else:
train_dict["status"] = "failed"
# print(train_dict)
train_list_dict.append(train_dict)
export_dict = {}
if "export_model.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
export_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
export_dict["status"] = "passed"
else:
export_dict["status"] = "failed"
# print(export_dict)
export_list_dict.append(export_dict)
predict_det_dict = {}
if "predict_det.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
predict_det_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
predict_det_dict["status"] = "passed"
else:
predict_det_dict["status"] = "failed"
# print(predict_det_dict)
predict_det_list_dict.append(predict_det_dict)
return train_list_dict, export_list_dict, predict_det_list_dict
def main():
"""
main
"""
args = parse_args()
a, b, c = process_log(args.log_file)
a_1 = pd.DataFrame(a)
b_1 = | pd.DataFrame(b) | pandas.DataFrame |
import os
import pandas as pd
import functools
import pyarrow as pa
from pathlib import Path
import pyarrow.dataset as ds
import pyarrow.compute as pc
import pyarrow.parquet as pq
from pyarrow import fs
import rdflib
import glob
# TODO: provide method for id -> data
class Client:
def __init__(self, db_dir, bucket, s3_endpoint=None, region=None):
# monkey-patch RDFlib to deal with some issues w.r.t. oxrdflib
def namespaces(self):
if not self.store.namespaces():
return []
for prefix, namespace in self.store.namespaces():
namespace = URIRef(namespace)
yield prefix, namespace
rdflib.namespace.NamespaceManager.namespaces = namespaces
self.s3 = fs.S3FileSystem(endpoint_override=s3_endpoint, region=region)
self.ds = ds.parquet_dataset(f'{bucket}/_metadata', partitioning='hive', filesystem=self.s3)
self.store = rdflib.Dataset(store="OxSled")
self.store.default_union = True # queries default to the union of all graphs
self.store.open(db_dir)
def _table_exists(self, table):
try:
res = self.data_cache.table(table)
return res is not None
except RuntimeError:
return False
def sparql(self, query, sites=None):
if sites is None:
res = self.store.query(query)
rows = list(map(str, row) for row in res)
df = pd.DataFrame.from_records(
rows, columns=[str(c) for c in res.vars]
)
return df
dfs = []
for site in sites:
graph_name = f"urn:{site}#"
graph = self.store.graph(graph_name)
res = graph.query(query)
rows = list(map(str, row) for row in res)
df = pd.DataFrame.from_records(
rows, columns=[str(c) for c in res.vars]
)
df["site"] = site
dfs.append(df)
if len(dfs) == 0:
return | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import time
import math
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import black_litterman
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.black_litterman import BlackLittermanModel
from statsmodels.tsa.arima_model import ARIMA
def filter(init, source, asset_arr=[1, 2, 3, 4], geo_arr=[7, 2, 3, 5, 4, 6, 1], score=3):
# Filter according to user's rank
asset_class = ["Equity", "Fixed Income",
"Mixed Allocation", "Money Market"]
geo_class = ["Africa& Middle West Region", "Asian Pacific Region", "European Region", "Greater China",
"International", "Latin American Region", "U.S."]
fund_num = init.shape[0]
filter_re = []
for i in range(0, fund_num):
asset_tmp = init['Asset Class'][i]
geo_tmp = init['Geographical Focus'][i]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and (geo_tmp == geo_class[geo_arr[0] - 1] or geo_tmp == geo_class[geo_arr[1] - 1] or geo_tmp == geo_class[geo_arr[2] - 1] or geo_tmp == geo_class[geo_arr[3] - 1])):
filter_re.append(init['ISIN'][i])
# If number of the funds filted is smaller than 100(can be specified), choose again
fund_filted_min = 100
for i in range(4, 7):
if (len(filter_re) < fund_filted_min):
for j in range(0, fund_num):
asset_tmp = init['Asset Class'][j]
if ((asset_tmp == asset_class[asset_arr[0] - 1] or asset_tmp == asset_class[asset_arr[1] - 1] or asset_tmp == asset_class[asset_arr[2] - 1]) and geo_class[geo_arr[i] - 1] == init['Geographical Focus'][j]):
filter_re.append(init['ISIN'][j])
else:
break
# data: names after filter + their risks
data = pd.DataFrame()
data.insert(loc=0, column='name', value=[])
data.insert(loc=1, column='risk', value=[])
for i in range(0, len(filter_re)):
col_index = source.columns.get_loc(filter_re[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
ann_risk = np.std(returns) * math.sqrt(252)
len_data = len(data)
data.loc[len_data, 'name'] = filter_re[i]
data.loc[len_data, 'risk'] = ann_risk
# Sort according to their risks
data_sort = data.sort_values(
axis=0, ascending=True, by='risk').reset_index(drop=True)
'''
print("\n---risk---")
print(data_sort)
print()
'''
# get corresponding funds according to scores
len_index = int(np.floor(len(data_sort['name']) / 5))
fil_name = []
if (score == 5):
for i in range(len_index * 4, len(data_sort['name'])):
fil_name.append(data_sort.loc[i, 'name'])
else:
for i in range(len_index * (score - 1), len_index * score):
fil_name.append(data_sort.loc[i, 'name'])
### result: name + returns
result = pd.DataFrame()
result.insert(loc=0, column='name', value=[])
result.insert(loc=1, column='returns', value=[])
for i in range(0, len(fil_name)):
col_index = source.columns.get_loc(fil_name[i])
price = source.iloc[:, col_index + 1]
price = price.dropna().reset_index(drop=True)
returns = np.diff(price) / price[:-1]
rets_add_one = returns + 1
cum_rets = rets_add_one.cumprod() - 1
len_data = len(result)
result.loc[len_data, 'name'] = fil_name[i]
result.loc[len_data, 'returns'] = cum_rets[len(cum_rets) - 1]
# Sort according to their returns
result_sort = result.sort_values(
axis=0, ascending=False, by='returns').reset_index(drop=True)
'''
print("\n---return---")
print(result_sort)
print()
'''
# name_final: 5 names
name_final = []
for i in range(0, 5):
name_final.append(result_sort.loc[i, 'name'])
# price_five: 5 names + their prices
price_five = pd.DataFrame()
for i in range(0, len(name_final)):
price_five.insert(loc=i * 2, column=i, value=[])
price_five.insert(loc=i * 2 + 1, column=name_final[i], value=[])
for i in range(0, len(name_final)):
col_index = source.columns.get_loc(name_final[i])
date = source.iloc[:, col_index]
price = source.iloc[:, col_index + 1]
price_five.iloc[:, i * 2 + 1] = price
price_five.iloc[:, i * 2] = date
# combine
tmp = pd.DataFrame()
tmp.insert(loc=0, column='date', value=[])
tmp.insert(loc=1, column=name_final[0], value=[])
tmp['date'] = price_five.iloc[:, 0]
tmp[name_final[0]] = price_five.iloc[:, 1]
for i in range(1, 5):
price_five.rename(columns={i: 'date'}, inplace=True)
tmp = pd.merge(
tmp, price_five.iloc[:, 2 * i:2 * i + 2], on='date', how='outer')
tmp = tmp.sort_values(axis=0, ascending=True,
by='date').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :]
tmp = tmp.dropna(how='all')
data_date_tmp = list(tmp['date']).copy()
for i in range(0, len(data_date_tmp)):
if(type(data_date_tmp[i]) != type("aha")):
break
tempt = datetime.datetime.strptime(data_date_tmp[i], "%Y/%m/%d")
y = tempt.year
m = tempt.month
d = tempt.day
data_date_tmp[i] = y * 365 + m * 30 + d
tmp['trans'] = data_date_tmp
tmp = tmp.sort_values(axis=0, ascending=True,
by='trans').reset_index(drop=True)
tmp = tmp.iloc[:len(source), :6]
filter1 = tmp.set_index('date')
return filter1
# filter1.to_csv("filter1.csv")
# print(filter1)
# df1 = pd.DataFrame({'d': ['2018/1/1', np.nan,'2019/8/3'], 'd1': [1,2,np.nan]})
# df2 = pd.DataFrame({'d': ['2018/1/1', '2019/1/3'], 'd2': [1,3]})
# df=pd.merge(df1,df2, on='d', how='outer')
# df=df.sort_values(axis=0, ascending=True, by='d').reset_index(drop=True)
# print(df)
def seven(data):
#data = pd.read_csv("filter1.csv",header = 0,index_col=[0])
#print(data)
data_fund = pd.read_csv("newfund.csv", header=0, encoding="UTF-8")
fund_1 = data_fund.iloc[:, 7:9]
fund_2 = pd.concat([data_fund.iloc[:, 0], data_fund.iloc[:, 4]], axis=1)
max_date = data.shape[0]
fund_tmp = np.array([range(0, max_date+1000), range(0, max_date+1000)])
fund_tmp = fund_tmp.transpose()
fund_tmp = fund_tmp * -1.0
date_tmp = np.array([range(0, max_date+1000), range(0, max_date+1000)])
date_tmp = date_tmp.transpose()
date_tmp = date_tmp * -1.0
'''
fund_list_tmp = [[]for i in range(2)]
for i in range(0,-max_date,-1):
fund_list_tmp[0].append(i)
for i in range(0,-max_date,-1):
fund_list_tmp[1].append(i)
date_list_tmp = [[]for i in range(2)]
for i in range(0,-max_date,-1):
date_list_tmp[0].append(i)
for i in range(0,-max_date,-1):
date_list_tmp[1].append(i)
'''
fund_1_date = 1
fund_2_date = 1
while (type(fund_1.iloc[fund_1_date, 0]) == type("aha") or (not np.isnan(fund_1.iloc[fund_1_date, 0]))):
fund_1_date = fund_1_date+1
while (type(fund_2.iloc[fund_2_date, 0]) == type("aha") or (not np.isnan(fund_2.iloc[fund_2_date, 0]))):
fund_2_date = fund_2_date+1
if (fund_2_date == fund_2.shape[0]):
break;
data_date_tmp = list(data.index).copy()
for i in range(0, len(data_date_tmp)):
tmp = datetime.datetime.strptime(data_date_tmp[i], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
data_date_tmp[i] = y*365+m*30+d
'''
#print(fund_1.iloc[1564,0])
#print(type(fund_1.iloc[1564,0]))
#print(np.isnan(int(fund_1.iloc[1564,0])))
'''
fund_tmp[0:fund_1_date, 0] = fund_1.iloc[0:fund_1_date, 1]
fund_tmp[0:fund_2_date, 1] = fund_2.iloc[0:fund_2_date, 1]
for i in range(0, fund_1_date):
tmp = datetime.datetime.strptime(fund_1.iloc[i, 0], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
date_tmp[i, 0] = y*365+m*30+d
for i in range(0, fund_2_date):
tmp = datetime.datetime.strptime(fund_2.iloc[i, 0], "%Y/%m/%d")
y = tmp.year
m = tmp.month
d = tmp.day
date_tmp[i, 1] = y*365+m*30+d
# print(fund_tmp[-100:,:])
# print(date_tmp[-100:,:])
# print(max_date)
i = 0
while i <= max_date - 1:
if date_tmp[i, 0] < 0:
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
fund_1_date = fund_1_date + 1
elif date_tmp[i, 0] > data_date_tmp[i]:
if i < max_date - 1:
# print(date_tmp[i,0],data_date_tmp[i])
# print(i,fund_1_date)
# print(fund_1_date)
fund_tmp[(i+1):(fund_1_date+1),
0] = fund_tmp[(i):(fund_1_date), 0]
date_tmp[(i+1):(fund_1_date+1),
0] = date_tmp[(i):(fund_1_date), 0]
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
elif i == max_date - 1:
date_tmp[i, 0] = data_date_tmp[i]
fund_tmp[i, 0] = -1000000 # NaN
fund_1_date = fund_1_date + 1
elif date_tmp[i, 0] < data_date_tmp[i]:
# print(date_tmp[i,0],data_date_tmp[i])
# print(i,fund_1_date)
# print(fund_1_date)
fund_tmp[(i):(fund_1_date), 0] = fund_tmp[(i+1):(fund_1_date+1), 0]
date_tmp[(i):(fund_1_date), 0] = date_tmp[(i+1):(fund_1_date+1), 0]
fund_tmp[fund_1_date-1, 0] = -1000000 # NaN
date_tmp[fund_1_date-1, 0] = -1000000 # NaN
fund_1_date = fund_1_date - 1
i = i - 1
i = i + 1
i = 0
while i <= max_date - 1:
if date_tmp[i, 1] < 0:
date_tmp[i, 1] = data_date_tmp[i]
fund_tmp[i, 1] = -1000000 # NaN
fund_2_date = fund_2_date + 1
elif date_tmp[i, 1] > data_date_tmp[i]:
if i < max_date - 1:
# print(date_tmp[i,1],data_date_tmp[i])
# print(i,fund_2_date)
# print(fund_1_date)
fund_tmp[(i+1):(fund_2_date+1),
1] = fund_tmp[(i):(fund_2_date), 1]
date_tmp[(i+1):(fund_2_date+1),
1] = date_tmp[(i):(fund_2_date), 1]
date_tmp[i, 1] = data.iloc[i, 1]
fund_tmp[i, 1] = -1000000 # NaN
elif i == max_date - 1:
date_tmp[i, 1] = data.iloc[i, 1]
fund_tmp[i, 1] = -1000000 # NaN
fund_2_date = fund_2_date + 1
elif date_tmp[i, 1] < data_date_tmp[i]:
fund_tmp[(i):(fund_2_date), 1] = fund_tmp[(i+1):(fund_2_date+1), 1]
date_tmp[(i):(fund_2_date), 1] = date_tmp[(i+1):(fund_2_date+1), 1]
fund_tmp[fund_2_date-1, 1] = -1000000 # NaN
date_tmp[fund_2_date-1, 1] = -1000000 # NaN
fund_2_date = fund_2_date - 1
i = i - 1
i = i + 1
fund_tmp = fund_tmp[:max_date, :]
# print(fund_tmp[-60:,:])
fund_new = pd.DataFrame(
fund_tmp, columns=["沪深300", "标普500"], index=data.index)
data = pd.concat([data, fund_new], axis=1)
# print(fund_new.iloc[-60:,:])
# print(data.iloc[-60:,:])
return data
def fund_completion(data):
data = seven(data)
date = data.shape[0]
fund_num = data.shape[1]
# print(rows,fund_num)
for j in range(0, fund_num):
for i in range(0, date):
if (data.iloc[i, j] == -1000000 or np.isnan(data.iloc[i, j])):
if i != date - 1:
i_tmp = i + 1
while ((data.iloc[i_tmp, j] == -1000000 or np.isnan(data.iloc[i_tmp, j])) and i_tmp <= date - 1):
i_tmp = i_tmp + 1
if i == 0:
data.iloc[i, j] = data.iloc[i_tmp, j]
elif i_tmp == date - 1:
data.iloc[i, j] = data[i-1, j]
else:
data.iloc[i, j] = (
data.iloc[i-1, j] + data.iloc[i_tmp, j]) / 2
else:
data.iloc[i, j] = data.iloc[i-1, j]
return data
def weights(data):
#df = pd.read_csv("complete_new.csv",parse_dates=[0], index_col=0,infer_datetime_format=True)
df = data
fund = df.iloc[0:, 0:5]
#print(fund.columns)
mu = expected_returns.mean_historical_return(fund)
# print(mu)
cov_matrix = risk_models.sample_cov(fund)
# print(S)
'''
# Method 1: Markowitz Mean-Variance Model
ef = EfficientFrontier(mu, cov_matrix, weight_bounds=(0.05, 0.4))
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
# print(cleaned_weights)
# ef.save_weights_to_file("weights.csv") # saves to file
ef.portfolio_performance(verbose=True)
weights = pd.DataFrame(cleaned_weights.values(),
index=cleaned_weights.keys(), columns=["weights"])
weights_T = pd.DataFrame(
weights.values.T, index=weights.columns, columns=weights.index)
# print(weights_T)
'''
# Method 2: Black-litterman Model
# Calculate Prior Return
spy_prices = df.iloc[0:, 6]
risk_pre = black_litterman.market_implied_risk_aversion(spy_prices)
mcaps = dict(zip(fund.columns,[1.0,1.0,1.0,1.0,1.0]))
prior = black_litterman.market_implied_prior_returns(mcaps, risk_pre, cov_matrix)
print(mu)
print(prior)
# Generate Absolute View by ARIMA
view_generated = [0.0, 0.0, 0.0, 0.0,0.0]
for fund_index in range (0,5):
model = ARIMA(df.iloc[:, fund_index],order=(5,1,0))
model_fit = model.fit(disp=-1, maxiter = 1000)
view_generated[fund_index] = (model_fit.forecast()[0] / df.iloc[-1,fund_index])- 1
#print("Predicted = {}" .format(model_fit.forecast()))
#print("Last = {}".format(df.iloc[-1, fund_index]))
viewdict = dict(zip(fund.columns,view_generated))
print(viewdict)
#print(mu)
bl_model = BlackLittermanModel(cov_matrix, absolute_views = viewdict, pi = prior)
rets = bl_model.bl_returns()
#print(rets)
# Generate Efficient Frontier and Optimize the model
ef = EfficientFrontier(rets, cov_matrix)
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
ef.portfolio_performance(verbose=True)
# Output
df = df.append(bl_model.weights, sort=False)
# print(df[-10:])
data_index = pd.DataFrame(df.index, index=df.index)
return_data = | pd.concat([data_index, df], axis=1) | pandas.concat |
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import warnings
from datetime import datetime
from typing import Tuple, Union, Optional, Sequence, Dict, Any
import pandas as pd
from xcube.util.assertions import assert_given, assert_true, assert_in
from .constants import CRS_ID_TO_URI
from .constants import CRS_URI_TO_ID
from .constants import DEFAULT_CRS
from .constants import DEFAULT_TILE_SIZE
from .constants import DEFAULT_TIME_TOLERANCE
from .constants import SH_MAX_IMAGE_SIZE
from .constants import RESAMPLINGS
from .constants import DEFAULT_RESAMPLING
from .constants import DEFAULT_MOSAICKING_ORDER
from .constants import MOSAICKING_ORDERS
def _safe_int_div(x: int, y: int) -> int:
return (x + y - 1) // y
Bbox = Tuple[float, float, float, float]
TimeRange = Union[str,
pd.Timestamp,
Tuple[Union[None, str, pd.Timestamp],
Union[None, str, pd.Timestamp]]]
class CubeConfig:
"""
Sentinel Hub cube configuration.
:param dataset_name: Dataset name. If *collection_id* is given,
*dataset_name* must be omitted or set to "CUSTOM".
:param band_names: Optional sequence of band names. If omitted (=None)
all bands are included.
:param band_units: Band units. Optional.
:param band_sample_types: Band sample types. Optional.
:param tile_size: Tile size as tuple (width, height). Optional.
:param chunk_size: Deprecated. Use *tile_size*.
:param bbox: tuple of 4 numbers: (x1, y1, x2, y2)
:param geometry: Deprecated. Use *bbox*.
:param spatial_res: Spatial resolution. Must be > 0.
:param crs: Coordinate reference system. If None, original source
CRS will be used.
:param upsampling: Spatial upsampling method.
Must be one of 'NEAREST', 'BILINEAR', 'BICUBIC'.
Defaults to 'NEAREST'.
:param downsampling: Spatial downsampling method.
Must be one of 'NEAREST', 'BILINEAR', 'BICUBIC'.
Defaults to 'NEAREST'.
:param mosaicking_order: Order in which observations are
temporarilly aggregated.
Must be one of 'mostRecent', 'leastRecent', 'leastCC'.
Defaults to 'mostRecent'.
:param time_range: Time range tuple; (start time, end time).
:param time_period: A string denoting the temporal aggregation perriod,
such as "8D", "1W", "2W".
If None, all observations are included.
:param time_tolerance: The tolerance used to identify whether a dataset
should still be included within a time period.
:param collection_id: Extra identifier used to identity a BYOC dataset.
If given, *dataset_name* must be omitted or set to "CUSTOM".
:param four_d: If variables should appear as forth dimension rather
than separate arrays.
:param exception_type: The type of exception to be raised on error
"""
def __init__(self,
dataset_name: str = None,
band_names: Sequence[str] = None,
band_units: Union[str, Sequence[str]] = None,
band_sample_types: Union[str, Sequence[str]] = None,
tile_size: Union[str, Tuple[int, int]] = None,
chunk_size: Union[str, Tuple[int, int]] = None,
bbox: Bbox = None,
geometry: Union[str, Tuple[float, float, float, float]] = None,
spatial_res: float = None,
crs: str = None,
upsampling: str = None,
downsampling: str = None,
mosaicking_order: str = None,
time_range: TimeRange = None,
time_period: Union[str, pd.Timedelta] = None,
time_tolerance: Union[str, pd.Timedelta] = None,
collection_id: str = None,
four_d: bool = False,
exception_type=ValueError):
crs = crs or DEFAULT_CRS
if crs in CRS_URI_TO_ID:
crs = CRS_URI_TO_ID[crs]
assert_true(crs in CRS_ID_TO_URI, 'invalid crs')
upsampling = upsampling or DEFAULT_RESAMPLING
assert_in(upsampling, RESAMPLINGS, 'upsampling')
downsampling = downsampling or DEFAULT_RESAMPLING
assert_in(downsampling, RESAMPLINGS, 'downsampling')
mosaicking_order = mosaicking_order or DEFAULT_MOSAICKING_ORDER
assert_in(mosaicking_order, MOSAICKING_ORDERS, 'mosaicking_order')
if not dataset_name:
assert_given(collection_id, 'collection_id')
dataset_name = 'CUSTOM'
if collection_id:
assert_true(dataset_name == 'CUSTOM',
'dataset_name must be "CUSTOM"')
assert_given(spatial_res, 'spatial_res')
assert_true(spatial_res > 0.0,
'spatial_res must be a positive number')
assert_true(not (geometry and bbox),
'geometry and bbox cannot both be given')
if geometry is not None:
warnings.warn('the geometry parameter is no longer '
'supported, use bbox instead')
if bbox is None:
bbox = geometry
geometry = None
assert_given(bbox, 'bbox')
assert_given(time_range, 'time_range')
start_date, end_date = time_range \
if time_range is not None else (None, None)
start_date = start_date \
if start_date is not None else '1970-01-01'
end_date = end_date \
if end_date is not None else datetime.now().strftime("%Y-%m-%d")
time_range = start_date, end_date
time_period = time_period or None
time_tolerance = time_tolerance or None
if time_period is None and time_tolerance is None:
time_tolerance = DEFAULT_TIME_TOLERANCE
try:
if isinstance(bbox, str):
x1, y1, x2, y2 = tuple(map(float, bbox.split(',', maxsplit=3)))
else:
x1, y1, x2, y2 = bbox
except (TypeError, ValueError):
raise ValueError('bbox must be a tuple of 4 numbers')
if chunk_size is not None:
warnings.warn('the chunk_size parameter is no '
'longer supported, use tile_size instead')
if tile_size is None:
tile_size = chunk_size
width, height = (max(1, round((x2 - x1) / spatial_res)),
max(1, round((y2 - y1) / spatial_res)))
if tile_size is None:
tile_width, tile_height = None, None
elif isinstance(tile_size, str):
parsed = tuple(map(int, geometry.split(',', maxsplit=1)))
if len(parsed) == 1:
tile_width, tile_height = parsed[0], parsed[0]
elif len(parsed) == 2:
tile_width, tile_height = parsed
else:
raise exception_type(f'invalid tile size: {tile_size}')
else:
tile_width, tile_height = tile_size
if tile_width is None and tile_height is None:
num_pixels_per_tile = DEFAULT_TILE_SIZE * DEFAULT_TILE_SIZE
tile_width = math.ceil(math.sqrt(width * num_pixels_per_tile
/ height))
tile_height = (num_pixels_per_tile + tile_width - 1) // tile_width
elif tile_width is None:
tile_width = tile_height
elif tile_height is None:
tile_height = tile_width
if tile_width > SH_MAX_IMAGE_SIZE:
tile_width = SH_MAX_IMAGE_SIZE
if tile_height > SH_MAX_IMAGE_SIZE:
tile_height = SH_MAX_IMAGE_SIZE
if width < 1.5 * tile_width:
tile_width = width
else:
width = self._adjust_size(width, tile_width)
if height < 1.5 * tile_height:
tile_height = height
else:
height = self._adjust_size(height, tile_height)
x2, y2 = x1 + width * spatial_res, y1 + height * spatial_res
bbox = x1, y1, x2, y2
if isinstance(time_range, str):
time_range = tuple(map(lambda s: s.strip(),
time_range.split(',', maxsplit=1)
if ',' in time_range else (
time_range, time_range)))
time_range = tuple(time_range)
if len(time_range) == 1:
time_range = time_range + time_range
if len(time_range) != 2:
exception_type('Time range must be have two elements')
start_time, end_time = tuple(time_range)
if isinstance(start_time, str) or isinstance(end_time, str):
def convert_time(time_str):
return pd.to_datetime(time_str, utc=True)
start_time, end_time = tuple(map(convert_time, time_range))
time_range = start_time, end_time
if isinstance(time_period, str):
time_period = pd.to_timedelta(time_period)
if isinstance(time_tolerance, str):
time_tolerance = | pd.to_timedelta(time_tolerance) | pandas.to_timedelta |
"""
Solve Portoflio problem for one year simulation
"""
import os
import numpy as np
from utils.general import make_sure_path_exists
import pandas as pd
from problem_classes.portfolio import PortfolioExample
# import osqppurepy as osqp
import osqp
class PortfolioParametric(object):
def __init__(self,
osqp_settings,
n_factors=100,
n_assets=3000,
n_months_per_risk_model_update=3,
n_years=4):
"""
Generate Portfolio problem as parametric QP
Args:
osqp_settings: osqp solver settings
n_factors: number of factors in risk model
n_assets: number of assets to be optimized
n_months_per_risk_model_update: number of months for every risk
model update
n_years: number of years to run the simulation
"""
self.osqp_settings = osqp_settings
self.n_factors = n_factors
self.n_assets = n_assets
self.n_qp_per_month = 20 # Number of trading days
self.n_qp_per_update = self.n_qp_per_month * \
n_months_per_risk_model_update
self.n_problems = n_years * 240
self.alpha = 0.1 # Relaxation parameter between new data nad old ones
def solve(self):
"""
Solve Portfolio problem
"""
print("Solve Portfolio problem for dimension %i" % self.n_factors)
# Create example instance
instance = PortfolioExample(self.n_factors, n=self.n_assets)
# Store number of nonzeros in F and D for updates
nnzF = instance.F.nnz
# Store alpha
alpha = self.alpha
'''
Solve problem without warm start
'''
# print("Solving without warm start")
# Solution directory
no_ws_path = os.path.join('.', 'results', 'parametric_problems',
'OSQP no warmstart',
'Portfolio',
)
# Create directory for the results
make_sure_path_exists(no_ws_path)
# Check if solution already exists
n_file_name = os.path.join(no_ws_path, 'n%i.csv' % self.n_factors)
if not os.path.isfile(n_file_name):
res_list_no_ws = [] # Initialize results
for i in range(self.n_problems):
qp = instance.qp_problem
# Solve problem
m = osqp.OSQP()
m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
**self.osqp_settings)
r = m.solve()
# DEBUG
# print("niter = %d" % r.info.iter)
solution_dict = {'status': [r.info.status],
'run_time': [r.info.run_time],
'iter': [r.info.iter],
'obj_val': [r.info.obj_val]}
if r.info.status != "solved":
print("OSQP no warmstart did not solve the problem")
import ipdb; ipdb.set_trace()
res_list_no_ws.append(pd.DataFrame(solution_dict))
# Update model
current_mu = instance.mu
current_F_data = instance.F.data
current_D_data = instance.D.data
if i % self.n_qp_per_update == 0:
# print("Update everything: mu, F, D")
# Update everything
new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
new_F = instance.F.copy()
new_F.data = alpha * np.random.randn(nnzF) + (1 - alpha) * current_F_data
new_D = instance.D.copy()
new_D.data = alpha * np.random.rand(instance.n) * \
np.sqrt(instance.k) + (1 - alpha) * current_D_data
instance.update_parameters(new_mu, new_F, new_D)
else:
# print("Update only mu")
# Update only mu
new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
instance.update_parameters(new_mu)
# Get full warm-start
res_no_ws = | pd.concat(res_list_no_ws) | pandas.concat |
import unittest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
import numpy as np
from ITMO_FS.embedded import *
np.random.seed(42)
class TestCases(unittest.TestCase):
data, target = np.random.randint(10, size=(100, 20)), np.random.randint(10, size=(100,))
feature_names = [''.join(['f', str(i)]) for i in range(data.shape[1])]
feature_names_override = [''.join(['g', str(i)]) for i in range(data.shape[1])]
def test_MOSS(self):
# MOSS
res = MOS().fit_transform(self.data, self.target, sampling=True)
assert self.data.shape[0] == res.shape[0]
print("MOSS:", self.data.shape, '--->', res.shape)
def test_MOSNS(self):
# MOSNS
res = MOS().fit_transform(self.data, self.target, sampling=False)
assert self.data.shape[0] == res.shape[0]
print("MOSNS:", self.data.shape, '--->', res.shape)
def test_losses(self):
for loss in ['log', 'hinge']:
res = MOS(loss=loss).fit_transform(self.data, self.target)
assert self.data.shape[0] == res.shape[0]
def test_df(self):
f = MOS()
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=True)
arr = f.fit_transform(self.data, self.target, sampling=True)
np.testing.assert_array_equal(df, arr)
df = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), sampling=False)
arr = f.fit_transform(self.data, self.target, sampling=False)
np.testing.assert_array_equal(df, arr)
def test_pipeline(self):
# FS
p = Pipeline([('FS1', MOS())])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - estim
p = Pipeline([('FS1', MOS()), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
# FS - FS
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge'))])
p.fit(self.data, self.target)
res = p.transform(self.data)
assert self.data.shape[0] == res.shape[0]
# FS - FS - estim
p = Pipeline([('FS1', MOS(loss='log')), ('FS2', MOS(loss='hinge')), ('E1', LogisticRegression())])
p.fit(self.data, self.target)
assert 0 <= p.score(self.data, self.target) <= 1
def test_feature_names_np(self):
f = MOS()
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(self.data, self.target, feature_names=self.feature_names, sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df(self):
f = MOS()
arr = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), feature_names=self.feature_names, sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(pd.DataFrame(self.data), pd.DataFrame(self.target), feature_names=self.feature_names, sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df_defined(self):
dfX = pd.DataFrame(self.data)
dfX.columns = self.feature_names
f = MOS()
arr = f.fit_transform(dfX, pd.DataFrame(self.target), sampling=True)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
arr = f.fit_transform(dfX, pd.DataFrame(self.target), sampling=False)
assert np.all([feature in self.feature_names for feature in f.get_feature_names()])
def test_feature_names_df_defined_override(self):
dfX = | pd.DataFrame(self.data) | pandas.DataFrame |
from itertools import groupby
from typing import Sequence, Tuple, Union, NamedTuple
from enum import Enum
import numpy as np
import pandas as pd
from .interface import csv_series
from .tags import get_common_tag, get_tag, drop_duplicated_instances
from .utils import Series, Instance, ORIENTATION, extract_dims, split_floats, zip_equal, contains_info, collect
from .exceptions import *
from .misc import get_image
__all__ = [
'get_orientation_matrix', 'get_slice_plane', 'get_slices_plane', 'Plane', 'order_series',
'get_slice_orientation', 'get_slices_orientation', 'SlicesOrientation',
'get_slice_locations', 'locations_to_spacing', 'get_slice_spacing', 'get_pixel_spacing',
'get_voxel_spacing', 'get_image_position_patient', 'drop_duplicated_slices',
# deprecated
'get_axes_permutation', 'get_flipped_axes', 'get_image_plane',
'restore_orientation_matrix'
]
class Plane(Enum):
Sagittal, Coronal, Axial = 0, 1, 2
class SlicesOrientation(NamedTuple):
transpose: bool
flip_axes: tuple
def _get_image_position_patient(instance: Instance):
return np.array(list(map(float, get_tag(instance, 'ImagePositionPatient'))))
@csv_series
def get_image_position_patient(series: Series):
"""Returns ImagePositionPatient stacked into array."""
return np.stack(list(map(_get_image_position_patient, series)))
def _get_image_orientation_patient(instance: Instance):
return np.array(list(map(float, get_tag(instance, 'ImageOrientationPatient'))))
def _get_orientation_matrix(instance: Instance):
row, col = _get_image_orientation_patient(instance).reshape(2, 3)
return np.stack([row, col, np.cross(row, col)])
@csv_series
def get_orientation_matrix(series: Series) -> np.ndarray:
"""
Returns a 3 x 3 orthogonal transition matrix from the image-based basis to the patient-based basis.
Rows are coordinates of image-based basis vectors in the patient-based basis, while columns are
coordinates of patient-based basis vectors in the image-based basis vectors.
"""
om = _get_orientation_matrix(series[0])
if not np.all([np.allclose(om, _get_orientation_matrix(i)) for i in series]):
raise ConsistencyError('Orientation matrix varies across slices.')
return om
def _get_image_planes(orientation_matrix):
return tuple(Plane(i) for i in np.argmax(np.abs(orientation_matrix), axis=1))
def get_slice_plane(instance: Instance) -> Plane:
return _get_image_planes(_get_orientation_matrix(instance))[2]
@csv_series
def get_slices_plane(series: Series) -> Plane:
unique_planes = set(map(get_slice_plane, series))
if len(unique_planes) > 1:
raise ConsistencyError('Slice plane varies across slices.')
plane, = unique_planes
return plane
def get_slice_orientation(instance: Instance) -> SlicesOrientation:
om = _get_orientation_matrix(instance)
planes = _get_image_planes(om)
if set(planes) != {Plane.Sagittal, Plane.Coronal, Plane.Axial}:
raise ValueError('Main image planes cannot be treated as saggital, coronal and axial.')
if planes[2] != Plane.Axial:
raise NotImplementedError('We do not know what is normal orientation for non-axial slice.')
transpose = planes[0] == Plane.Coronal
if transpose:
om = om[[1, 0, 2]]
flip_axes = []
if om[1, 1] < 0:
flip_axes.append(0)
if om[0, 0] < 0:
flip_axes.append(1)
return SlicesOrientation(transpose=transpose, flip_axes=tuple(flip_axes))
@csv_series
def get_slices_orientation(series: Series) -> SlicesOrientation:
orientations = set(map(get_slice_orientation, series))
if len(orientations) > 1:
raise ConsistencyError('Slice orientation varies across slices.')
orientation, = orientations
return orientation
@csv_series
def order_series(series: Series, decreasing=True) -> Series:
index = get_slices_plane(series).value
return sorted(series, key=lambda s: _get_image_position_patient(s)[index], reverse=decreasing)
@csv_series
def get_slice_locations(series: Series) -> np.ndarray:
"""
Computes slices location from ImagePositionPatient.
WARNING: the order of slice locations can be both increasing or decreasing for ordered series
(see order_series).
"""
om = get_orientation_matrix(series)
return np.array([_get_image_position_patient(i) @ om[-1] for i in series])
@csv_series
def _get_slices_deltas(series: Series) -> np.ndarray:
"""Returns distances between slices."""
slice_locations = get_slice_locations(series)
deltas = np.abs(np.diff(sorted(slice_locations)))
return deltas
@csv_series
def get_slice_spacing(series: Series, max_delta: float = 0.1, errors: bool = True) -> float:
"""
Returns constant distance between slices of a series.
If the series doesn't have constant spacing - raises ValueError if ``errors`` is True,
returns ``np.nan`` otherwise.
"""
try:
locations = get_slice_locations(series)
except ConsistencyError:
if errors:
raise
return np.nan
return locations_to_spacing(sorted(locations), max_delta, errors)
def locations_to_spacing(locations: Sequence[float], max_delta: float = 0.1, errors: bool = True):
def throw(err):
if errors:
raise err
return np.nan
if len(locations) <= 1:
return throw(ValueError('Need at least 2 locations to calculate spacing.'))
deltas = np.diff(locations)
if len(np.unique(np.sign(deltas))) != 1:
return throw(ConsistencyError('The locations are not strictly monotonic.'))
deltas = np.abs(deltas)
min_, max_ = deltas.min(), deltas.max()
diff = max_ - min_
if diff > max_delta:
return throw(ConsistencyError(f'Non-constant spacing, ranging from {min_} to {max_} (delta: {diff}).'))
return deltas.mean()
@csv_series
def get_pixel_spacing(series: Series) -> Tuple[float, float]:
"""Returns pixel spacing (two numbers) in mm."""
pixel_spacings = np.stack([s.PixelSpacing for s in series])
if (pixel_spacings.max(axis=0) - pixel_spacings.min(axis=0)).max() > 0.01:
raise ConsistencyError('Pixel spacing varies across slices.')
return pixel_spacings[0]
@csv_series
def get_voxel_spacing(series: Series):
"""Returns voxel spacing: pixel spacing and distance between slices' centers."""
dx, dy = get_pixel_spacing(series)
dz = get_slice_spacing(series)
return dx, dy, dz
@csv_series
def get_image_size(series: Series):
rows = get_common_tag(series, 'Rows')
columns = get_common_tag(series, 'Columns')
slices = len(series)
return rows, columns, slices
def drop_duplicated_slices(series: Series, tolerance_hu=1) -> Series:
series = drop_duplicated_instances(series)
indices = list(range(len(series)))
slice_locations = get_slice_locations(series)
try:
instance_numbers = [get_tag(i, 'InstanceNumber') for i in series]
indices = sorted(indices, key=lambda i: (slice_locations[i], instance_numbers[i]))
except TagMissingError:
indices = sorted(indices, key=lambda i: slice_locations[i])
new_indices = []
for _, group in groupby(indices, key=lambda i: slice_locations[i]):
group = list(group)
image = get_image(series[group[0]])
if not all(np.allclose(get_image(series[i]), image, atol=tolerance_hu) for i in group):
raise ValueError(
'Slices have same locations, but different pixel arrays.')
new_indices.append(group[0])
return [series[i] for i in sorted(new_indices)]
# ------------------ DEPRECATED ------------------------
get_image_plane = np.deprecate(get_slices_plane, old_name='get_image_plane')
get_xyz_spacing = np.deprecate(get_voxel_spacing, old_name='get_xyz_spacing')
@np.deprecate
def get_axes_permutation(row: pd.Series):
return np.abs(get_orientation_matrix(row)).argmax(axis=0)
@np.deprecate
@csv_series
@collect
def get_flipped_axes(series: Series):
m = get_orientation_matrix(series)
for axis, j in enumerate(np.abs(m).argmax(axis=1)[:2]):
if m[axis, j] < 0:
yield axis
@np.deprecate
def order_slice_locations(dicom_metadata: pd.Series):
locations = split_floats(dicom_metadata.SliceLocations)
if np.any([np.isnan(loc) for loc in locations]):
raise ValueError("Some SliceLocations are missing")
# Do not put `restore_slice_location` here,
# since `should_flip` has unexpected behaviour in that case.
return np.array(sorted(zip_equal(
split_floats(dicom_metadata.InstanceNumbers),
locations
))).T
# TODO: something must return transpose order, so we can apply it to all important metadata
# TODO: take PatientPosition into account
# def transpose_series(series: Series, plane: Union[Plane, int] = Plane.Axial):
# pass
# TODO: rewrite based on deployment code, specifically use transpose based on Plane
@np.deprecate
def normalize_orientation(image: np.ndarray, row: pd.Series):
"""
Transposes and flips the ``image`` to standard (Coronal, Sagittal, Axial) orientation.
Warnings
--------
Changing image orientation. New image orientation will not coincide with metadata!
"""
if not contains_info(row, *ORIENTATION):
raise ValueError('There is no enough metadata to standardize the image orientation.')
if np.isnan(get_orientation_matrix(row)).any():
raise ValueError('There is no enough metadata to standardize the image orientation.')
image = np.flip(image, axis=get_flipped_axes(row))
return image.transpose(*get_axes_permutation(row))
# TODO: legacy?
def restore_orientation_matrix(metadata: Union[pd.Series, pd.DataFrame]):
"""
Fills nan values (if possible) in ``metadata``'s ImageOrientationPatient* rows.
Required columns: ImageOrientationPatient[0-5]
Notes
-----
The input dataframe will be mutated.
"""
def restore(vector):
null = | pd.isnull(vector) | pandas.isnull |
"""
This script is a simplified script for True-voice Intent classification problem using a dataset from https://github.com/PyThaiNLP/truevoice-intent
The script is simplified and adapted based on https://github.com/PyThaiNLP/truevoice-intent/blob/master/classification.ipynb
"""
import pandas as pd
import deepcut
from deepcut import DeepcutTokenizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
if __name__ == '__main__':
df = | pd.read_csv('mari-intent/mari_train.csv') | pandas.read_csv |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = | Period(freq="Q", year=2007, quarter=1) | pandas.Period |
################################################################################
# Copyright (C) 2016-2019 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import os
import sys
import argparse
import re
import pandas as pd
def RunMain():
userArgs = sys.argv[1:]
argParser = argparse.ArgumentParser()
argParser.add_argument("current_file", help="path where the current results are located")
argParser.add_argument("new_file", help="path where the new files are located")
argParser.add_argument("combined_file", help="path where the combined results are located")
args = argParser.parse_args(userArgs)
currentFileName = args.current_file
newFileName = args.new_file
combinedFileName = args.combined_file
current_data = pd.read_csv(currentFileName)
headers = current_data.columns.values.tolist()
keys = headers[0:len(headers)-4]
new_data = | pd.read_csv(newFileName) | pandas.read_csv |
import time
import numpy as np
import pandas as pd
from numpy import dtype
dtypes = {'timestamp': np.int64, 'Asset_ID': np.int8,
'Count': np.int32, 'Open': np.float64,
'High': np.float64, 'Low': np.float64,
'Close': np.float64, 'Volume': np.float64,
'VWAP': np.float64, 'Target': np.float64}
def datestring_to_timestamp(ts):
return int( | pd.Timestamp(ts) | pandas.Timestamp |
#!/usr/bin/python
print('4_recent_OwnEa_q_last_8 - initiating.')
import os
import pandas as pd
pd.options.mode.chained_assignment = None
| pd.set_option('use_inf_as_na', True) | pandas.set_option |
import statsmodels.api as sml
import numpy as np
import pandas as pd
# TODO: Implement Meta Labeling
def t_value_lin(sample):
"""
t value from a linear trend
:param sample:
:type sample:
:return:
:rtype:
"""
x = np.ones((sample.shape[0]), 2)
x[:, 1] = np.arange(sample.shape[0])
ols = sml.OLS(sample, x).fit()
return ols.tvalues[1]
def getDailyVol(close, span0=100):
# daily vol, reindexed to close
df0 = close.index.searchsorted(close.index - pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0 - 1],
index=close.index[close.shape[0] - df0.shape[0]:])
# daily returns
df0 = close.loc[df0.index] / close.loc[df0.values].values - 1
df0 = df0.ewm(span=span0).std()
return df0
class TripleBarrier:
def __init__(self, barrier_up=0, barrier_down=0, min_return=-999):
"""
:param min_return: The minimum target return required for running
a triple barrier search.
:type min_return: float
:param barrier_up: non-negative float value that is used for setting
the upper barrier. If 0 there will be no upper barrier
:type barrier_up: float
:param barrier_down: non-negative float value that is used for setting
the inferior barrier. If 0 there will be no inferior barrier
:type barrier_down: float
"""
self.barrier_up = barrier_up
self.barrier_down = barrier_down
self.min_return = min_return
def get_events(self, prices, time_events, target, tl=False):
"""
:param prices: A pandas series of prices
:type prices: pd.Series
:param time_events: The pandas timeindex containing the timestamps
that will seed every triple barrier.
These are the timestamps selected by the sampling procedures
discussed in Chapter 2, Section 2.5.
:type time_events:
:param target: A pandas series of targets, expressed in terms of
absolute returns.
:type target: pd.Series
:param tl: A pandas series with the timestamps of the vertical
barriers. We pass a False when we want to disable vertical barriers.
:type tl: pd.Series or Boolean
:return: pd.DataFrame with the time index and a tl and target columns
and the next date when one of the triple barriers is hit
:rtype: pd.DataFrame
"""
# Get target
target_filtered = target.loc[time_events]
target_filtered = target_filtered[
target_filtered > self.min_return] # minRet
# Get tl (max holding period)
if tl is False:
tl = pd.Series(pd.NaT, index=time_events)
# Form events object, apply stop loss on tl
side_ = pd.Series(1., index=target_filtered.index)
events = pd.concat({'tl': tl,
'target': target_filtered,
'side': side_},
axis=1).dropna(subset=['target'])
df0 = self.simulate(prices=prices, events=events,
molecule=events.index)
events['tl'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
events = events.drop('side', axis=1)
return events
def simulate(self, prices, events, molecule):
"""
Apply stop loss/profit taking, if it takes place before tl
(end of event)
:param prices: Prices
:type prices: pd.Series
:param events: A pandas dataframe, with columns:
tl: The timestamp of vertical barrier. When the value is np.nan,
there will not be a vertical barrier.
target: The unit width of the horizontal barriers
:type events: pd.DataFrame
:param molecule: A list with the subset of event indices
that will be processed by a single thread.
:type molecule: pd.DataFrame
:return: pd.DataFrame with the time index and 3 columns of
when one of the barriers is hit
:rtype: pd.DataFrame
"""
#
events_ = events.loc[molecule]
out = events_[['tl']].copy(deep=True)
if self.barrier_up > 0:
pt = self.barrier_up * events_['target']
else:
pt = pd.Series(index=events.index) # NaNs
if self.barrier_down > 0:
sl = -self.barrier_down * events_['target']
else:
sl = | pd.Series(index=events.index) | pandas.Series |
#
# (c) FFRI Security, Inc., 2020-2021 / Author: FFRI Security, Inc.
#
import glob
import json
import os
import pickle
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import auc, roc_curve
from sklearn.model_selection import train_test_split
from fexrd import AllFeaturesExtractor
def classify(df: pd.DataFrame) -> None:
X = df.drop("labels", axis=1).to_numpy()
y = df["labels"].to_numpy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.25
)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_val, y_val, reference=lgb_train)
lgbm_param = {"objective": "binary", "verbose": -1}
model = lgb.train(lgbm_param, lgb_train, valid_sets=lgb_test)
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
fpr, tpr, thresholds = roc_curve(y_test, y_pred)
print(f"AUC: {auc(fpr, tpr)}")
def save_as_pickle(df: pd.DataFrame, out_name: str) -> None:
with open(out_name, "wb") as fout:
pickle.dump(df, fout)
def _main() -> None:
data_dir_in = "./data"
# NOTE: Choose one of the following feature extractor class.
# If you want to use strings feature only,
# please comment out the "fe = AllFeaturesExtractor()" line and uncomment the "fe = StringsFeatureExtractor()" line.
# Don't forget to import a new feature extractor class when uncommenting.
fe = AllFeaturesExtractor("v2020")
# fe = StringsFeatureExtractor("v2020")
# fe = LiefFeatureExtractor("v2020")
cache_fname = f"{fe.feature_name}_cache.pickle"
vecs = list()
row_names = list()
column_names = None
labels = list()
print(
f"Now load file from {data_dir_in}/non_packed.jsonl and {data_dir_in}/packed/*.jsonl"
)
print("It takes 3 or 5 minutes.")
# non-packed binaries
with open(os.path.join(data_dir_in, "non_packed.jsonl"), "r") as fin:
for line in fin:
obj = json.loads(line)
column_names, vec = fe.get_features(
obj[fe.feature_name] if fe.feature_name != "all" else obj
)
vecs.append(vec)
row_names.append(obj["hashes"]["sha256"])
labels.append(0)
# packed binaries
for json_in in glob.glob(os.path.join(data_dir_in, "packed", "*.jsonl")):
with open(json_in, "r") as fin:
for line in fin:
obj = json.loads(line)
_, vec = fe.get_features(
obj[fe.feature_name] if fe.feature_name != "all" else obj
)
vecs.append(vec)
row_names.append(obj["hashes"]["sha256"])
labels.append(1)
df = | pd.DataFrame(data=vecs, columns=column_names) | pandas.DataFrame |
import json
from datetime import datetime
from io import BytesIO
import matplotlib as mpl
import numpy as np
import pandas as pd
import requests
from matplotlib import pyplot as plt
def get_weather_df(username, password, port, url, zipcode):
line = 'https://'+username+':'+password+'@<EMAIL>:'+port+'/api/weather/v1/location/'+zipcode+'%3A4%3AUS/forecast/hourly/48hour.json?units=m&language=en-US'
raw = requests.get(line)
weather = json.loads(raw.text)
df = pd.DataFrame.from_dict(weather['forecasts'][0],orient='index').transpose()
for forecast in weather['forecasts'][1:]:
df = pd.concat([df, pd.DataFrame.from_dict(forecast,orient='index').transpose()])
time = np.array(df['fcst_valid_local'])
for row in range(len(time)):
time[row] = datetime.strptime(time[row], '%Y-%m-%dT%H:%M:%S%z')
df = df.set_index(time)
return df
def get_weather_plots(df):
plt.ioff()
df['rain'] = df['pop'].as_matrix()
tmean = pd.rolling_mean(df['temp'], window=4, center=True)
rhmean = pd.rolling_mean(df['rh'], window=4, center=True)
cldsmean = | pd.rolling_mean(df['clds'], window=4, center=True) | pandas.rolling_mean |
import pytest
import pandas as pd
from collections import ChainMap
import datetime
import pyarrow as pa
from arize.pandas.logger import Schema
from arize.utils.types import Environments, ModelTypes
from arize.pandas.validation.validator import Validator
import arize.pandas.validation.errors as err
def test_zero_errors():
errors = Validator.validate_types(**kwargs)
assert len(errors) == 0
# may need to revisit this case
def test_reject_all_nones():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([None])})
),
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeFeatures
def test_invalid_type_prediction_id():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{"prediction_id": pd.Series([datetime.datetime.now()])}
)
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_invalid_type_prediction_id_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_id": pd.Series([3.14])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_invalid_type_timestamp():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_timestamp": pd.Series(["now"])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_valid_timestamp_datetime():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{"prediction_timestamp": pd.Series([datetime.datetime.now()])}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_date():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[datetime.datetime.now().date()]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[datetime.datetime.now().timestamp()]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_timestamp_int():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_timestamp": pd.Series(
[int(datetime.datetime.now().timestamp())]
)
}
)
)
},
kwargs,
)
)
assert len(errors) == 0
def test_invalid_type_features():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([list()])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeFeatures
def test_invalid_type_shap_values():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"a": pd.Series([list()])})
)
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidTypeShapValues
def test_invalid_label():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Categorical([None])})
),
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_valid_label_int():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series([int(1)])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_str():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series(["0"])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series([3.14])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_label_bool():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_label": pd.Series([True])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_feature_int():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([int(1)])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_feature_str():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series(["0"])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_feature_float():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([3.14])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_valid_feature_bool():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"A": pd.Series([True])})
),
},
kwargs,
)
)
assert len(errors) == 0
def test_invalid_score():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame({"prediction_score": pd.Series(["fraud"])})
),
},
kwargs,
)
)
assert len(errors) == 1
assert type(errors[0]) is err.InvalidType
def test_multiple():
errors = Validator.validate_types(
**ChainMap(
{
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_id": pd.Series([datetime.datetime.now()]),
"prediction_timestamp": pd.Series(["now"]),
"A": pd.Series([list()]),
"a": pd.Series([list()]),
"prediction_label": pd.Categorical([None]),
"prediction_score": pd.Series(["fraud"]),
}
)
)
},
kwargs,
)
)
assert len(errors) == 6
assert any(type(e) is err.InvalidType for e in errors)
assert any(type(e) is err.InvalidTypeFeatures for e in errors)
assert any(type(e) is err.InvalidTypeShapValues for e in errors)
kwargs = {
"model_type": ModelTypes.SCORE_CATEGORICAL,
"schema": Schema(
prediction_id_column_name="prediction_id",
timestamp_column_name="prediction_timestamp",
prediction_label_column_name="prediction_label",
actual_label_column_name="actual_label",
prediction_score_column_name="prediction_score",
actual_score_column_name="actual_score",
feature_column_names=list("ABCDEFG"),
shap_values_column_names=dict(zip("ABCDEF", "abcdef")),
),
"pyarrow_schema": pa.Schema.from_pandas(
pd.DataFrame(
{
"prediction_id": | pd.Series(["0", "1", "2"]) | pandas.Series |
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = pd.to_datetime(omicron_start_date)
omicron_start_day_tmp = (
pd.to_datetime(omicron_start_date_tmp) - pd.to_datetime(start_date)
).days
for ii in range(mob_samples):
# if before omicron introduced in a jurisdiction, we consider what period we're at:
# 1. Wildtype
# 2. Alpha
# 3. Delta
voc_vacc_product[:, ii] = vacc_ts_delta[:, ii]
idx_start = df_state.loc[df_state.date < alpha_start_date].shape[0]
idx_end = df_state.loc[df_state.date < delta_start_date].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_alpha[ii]
idx_start = idx_end
idx_end = df_state.loc[df_state.date < omicron_start_date_tmp].shape[0]
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
idx_start = idx_end
idx_end = np.shape(voc_vacc_product)[0]
if strain == "Delta":
voc_vacc_product[idx_start:idx_end, ii] *= voc_multiplier_delta[ii]
elif strain == "Omicron":
# if omicron we need to account for the Omicron VE prior to the introduction of
# omicron in mid November
voc_vacc_product[idx_start:idx_end, ii] = (
vacc_ts_omicron[idx_start:idx_end, ii] * voc_multiplier_omicron[ii]
)
# save the components of the TP
pd.DataFrame(sim_R).to_csv(results_dir + "baseline_R_L_" + strain + ".csv")
pd.DataFrame(md).to_csv(results_dir + "md_" + strain + ".csv")
pd.DataFrame(masks).to_csv(results_dir + "masks_" + strain + ".csv")
macro = 2 * expit(logodds.T)
pd.DataFrame(macro).to_csv(results_dir + "macro_" + strain + ".csv")
pd.DataFrame(voc_vacc_product).to_csv(results_dir + "voc_vacc_product_" + strain + ".csv")
# calculate TP
R_L = (
2 * expit(logodds.T)
* md
* masks
* sim_R
* voc_vacc_product
)
# now we increase TP by 15% based on school reopening (this code can probably be reused
# but inferring it would be pretty difficult
# due to lockdowns and various interruptions since March 2020)
if scenarios[state] == "school_opening_2022":
R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :] = (
1.15 * R_L[dd.values >= pd.to_datetime(scenario_dates[state]), :]
)
# calculate summary stats
R_L_med = np.median(R_L, axis=1)
R_L_lower = np.percentile(R_L, 25, axis=1)
R_L_upper = np.percentile(R_L, 75, axis=1)
R_L_bottom = np.percentile(R_L, 5, axis=1)
R_L_top = np.percentile(R_L, 95, axis=1)
# R_L
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend([typ] * df_state.shape[0])
state_Rs["date"].extend(dd.values) # repeat mob_samples times?
state_Rs["lower"].extend(R_L_lower)
state_Rs["median"].extend(R_L_med)
state_Rs["upper"].extend(R_L_upper)
state_Rs["top"].extend(R_L_top)
state_Rs["bottom"].extend(R_L_bottom)
state_Rs["mean"].extend(np.mean(R_L, axis=1))
state_Rs["std"].extend(np.std(R_L, axis=1))
state_R[state] = R_L
# generate a summary for the R_I
for state in states:
# R_I
if strain == "Delta":
R_I = samples["R_I"].values[:df_state.shape[0]]
elif strain == "Omicron":
# if Omicron period, then we need to multiply in the VoC effect as there's a period
# in the fitting where Delta and Omicron overlap (i.e. R_I = R_I * P(t) where P(t) is
# a product term).
R_I = samples["R_I_omicron"].values[:df_state.shape[0]]
state_Rs["state"].extend([state] * df_state.shape[0])
state_Rs["type"].extend(["R_I"] * df_state.shape[0])
state_Rs["date"].extend(dd.values)
state_Rs["lower"].extend(np.repeat(np.percentile(R_I, 25), df_state.shape[0]))
state_Rs["median"].extend(np.repeat(np.median(R_I), df_state.shape[0]))
state_Rs["upper"].extend(np.repeat(np.percentile(R_I, 75), df_state.shape[0]))
state_Rs["top"].extend(np.repeat(np.percentile(R_I, 95), df_state.shape[0]))
state_Rs["bottom"].extend(np.repeat(np.percentile(R_I, 5), df_state.shape[0]))
state_Rs["mean"].extend(np.repeat(np.mean(R_I), df_state.shape[0]))
state_Rs["std"].extend(np.repeat(np.std(R_I), df_state.shape[0]))
df_Rhats = pd.DataFrame().from_dict(state_Rs)
df_Rhats = df_Rhats.set_index(["state", "date", "type"])
d = pd.DataFrame()
for state in states:
for i, typ in enumerate(forecast_type):
if i == 0:
t = pd.DataFrame.from_dict(state_R[state])
t["date"] = dd.values
t["state"] = state
t["type"] = typ
else:
temp = pd.DataFrame.from_dict(state_R[state])
temp["date"] = dd.values
temp["state"] = state
temp["type"] = typ
t = t.append(temp)
# R_I
if strain == "Delta":
# use the Delta import reproduction number before Omicron starts
i = pd.DataFrame(np.tile(samples["R_I"].values, (len(dd.values), 1)))
elif strain == "Omicron":
# use the Omicron import reproduction number after Omicron starts
i = pd.DataFrame(np.tile(samples["R_I_omicron"].values, (len(dd.values), 1)))
i["date"] = dd.values
i["type"] = "R_I"
i["state"] = state
t = t.append(i)
d = d.append(t)
d = d.set_index(["state", "date", "type"])
df_Rhats = df_Rhats.join(d)
df_Rhats = df_Rhats.reset_index()
df_Rhats.state = df_Rhats.state.astype(str)
df_Rhats.type = df_Rhats.type.astype(str)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=6 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_6_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
row = i // 2
col = i % 2
plot_df = df_Rhats.loc[(df_Rhats.state == state) & (df_Rhats.type == "R_L")].copy()
# split the TP into pre data date and after
plot_df_backcast = plot_df.loc[plot_df["date"] <= data_date].copy()
plot_df_forecast = plot_df.loc[plot_df["date"] > data_date].copy()
# plot the backcast TP
ax[row, col].plot(plot_df_backcast.date, plot_df_backcast["median"], color="C0")
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["lower"],
plot_df_backcast["upper"],
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
plot_df_backcast.date,
plot_df_backcast["bottom"],
plot_df_backcast["top"],
alpha=0.4,
color="C0",
)
# plot the forecast TP
ax[row, col].plot(plot_df_forecast.date, plot_df_forecast["median"], color="C1")
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["lower"],
plot_df_forecast["upper"],
alpha=0.4,
color="C1",
)
ax[row, col].fill_between(
plot_df_forecast.date,
plot_df_forecast["bottom"],
plot_df_forecast["top"],
alpha=0.4,
color="C1",
)
ax[row, col].tick_params("x", rotation=90)
ax[row, col].set_title(state)
ax[row, col].set_yticks(
[1],
minor=True,
)
ax[row, col].set_yticks([0, 2, 4, 6], minor=False)
ax[row, col].set_yticklabels([0, 2, 4, 6], minor=False)
ax[row, col].yaxis.grid(which="minor", linestyle="--", color="black", linewidth=2)
ax[row, col].set_ylim((0, 6))
# ax[row, col].set_xticks([plot_df.date.values[-n_forecast]], minor=True)
ax[row, col].axvline(data_date, ls="-.", color="black", lw=1)
# plot window start date
plot_window_start_date = min(
pd.to_datetime(today) - timedelta(days=12 * 30),
sim_start_date - timedelta(days=truncation_days),
)
# create a plot window over the last six months
ax[row, col].set_xlim(
plot_window_start_date,
pd.to_datetime(today) + timedelta(days=num_forecast_days),
)
# plot the start date
ax[row, col].axvline(sim_start_date, ls="--", color="green", lw=2)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=2)
fig.text(
0.03,
0.5,
"Transmission potential",
va="center",
ha="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.525, 0.02, "Date", va="center", ha="center", fontsize=20)
plt.tight_layout(rect=[0.04, 0.04, 1, 1])
print("============")
print("Saving results")
print("============")
plt.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/TP_12_month_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".png",
dpi=144,
)
# save values for the functional omicron related proportions for each state
prop_omicron_vars = ("r", "tau", "m0", "m1")
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state].copy()
for v in prop_omicron_vars:
# take right size of the values to be N by N
y = samples[v + "[" + str(kk + 1) + "]"].values
pd.DataFrame(y[:mob_samples]).to_csv(
results_dir
+ v
+ "_"
+ state
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
# now we save the sampled TP paths
# convert the appropriate sampled susceptible depletion factors to a csv and save them for simulation
# NOTE: this will not save an updated median, mean etc for the R_I's. We don't use it so it's not
# really important but it should be noted for later if we are comparing things. The step function
# R_I -> R_I_omicron, is noticeable and shouldn't be overlooked.
df_Rhats = df_Rhats[
["state", "date", "type", "median", "bottom", "lower", "upper", "top"]
+ [i for i in range(mob_samples)]
]
# # save the file as a csv (easier to handle in Julia for now)
df_Rhats.to_csv(
results_dir
+ "soc_mob_R_"
+ strain
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
return None
def calculate_Reff_local(
Reff,
R_I,
R_I_omicron,
voc_effect,
prop_import,
omicron_start_day,
):
"""
Apply the same mixture model idea as per the TP model to get
R_eff^L = (R_eff - rho * RI)/(1 - rho)
and use this to weight the TP historically.
"""
# calculate this all in one step. Note that we set the Reff to -1 if
# the prop_import = 1 as in that instance the relationship breaks due to division by 0.
Reff_local = np.zeros(shape=Reff.shape[0])
for n in range(len(Reff_local)):
# adjust the Reff based on the time period of interest
if n < omicron_start_day:
R_I_tmp = R_I
else:
R_I_tmp = R_I_omicron * voc_effect
if prop_import[n] < 1:
Reff_local[n] = (Reff[n] - prop_import[n] * R_I_tmp) / (1 - prop_import[n])
else:
Reff_local[n] = 0
# Reff_local = [
# (Reff[t] - prop_import[t] * R_I) / (1 - prop_import[t])
# if prop_import[t] < 1 else -1 for t in range(Reff.shape[0])
# ]
return Reff_local
def adjust_TP(data_date):
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
n_days_nowcast_TP_adjustment,
mob_samples,
)
print("============")
print("Adjusting TP forecasts using data from", data_date)
print("============")
data_date = pd.to_datetime(data_date)
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
sim_start_date = pd.to_datetime(sim_start_date)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start=third_start_date, end=third_end_date).values,
"NT": pd.date_range(start="2021-12-01", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-11-25", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-08-01", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = | pd.to_datetime(surveys.date) | pandas.to_datetime |
from collections import OrderedDict, defaultdict
import re
import operator
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import auc, precision_recall_curve, roc_curve, f1_score
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold
from tqdm import tqdm_notebook
import torch
from snorkel.labeling import LabelModel
from .dataframe_helper import generate_results_df
def indexed_combination(seq, n):
# obtained from
# https://stackoverflow.com/questions/47234958/generate-a-random-equally-probable-combination-from-a-list-in-python
result = []
for u in seq:
if n & 1:
result.append(u)
n >>= 1
if not n:
break
return result
def sample_lfs(lf_list, size_of_sample_pool, size_per_sample, number_of_samples, random_state=100):
pd.np.random.seed(random_state)
bit_list = [1 if i < size_per_sample else 0 for i in range(size_of_sample_pool)]
already_seen = set({})
lf_combinations = []
for sample in range(number_of_samples):
# sample with replacement
pd.np.random.shuffle(bit_list)
# obtained from
# https://stackoverflow.com/questions/12461361/bits-list-to-integer-in-python
out=0
for bit in bit_list:
out = (out << 1) | bit
lf_combinations.append(indexed_combination(lf_list, out))
return lf_combinations
def train_model_random_lfs(randomly_sampled_lfs, train_matrix, dev_matrix, dev_labels, test_matrix, regularization_grid):
hyper_grid_results = defaultdict(dict)
train_grid_results = defaultdict(dict)
dev_grid_results = defaultdict(dict)
test_grid_results = defaultdict(dict)
models = defaultdict(dict)
for lf_sample in tqdm_notebook(enumerate(randomly_sampled_lfs)):
for param in regularization_grid:
label_model=LabelModel(cardinality=2)
label_model.fit(
train_matrix[:,lf_sample[1]], n_epochs=1000,
seed=100, lr=0.01, l2=param,
)
# Get marginals for each parameter
hyper_grid_results[str(param)] = roc_curve(
dev_labels,
label_model.predict_proba(dev_matrix[:,lf_sample[1]])[:,1]
)
# Convert marginals into AUROCs
hyper_grid_results = {
param:auc(hyper_grid_results[param][0], hyper_grid_results[param][1])
for param in hyper_grid_results
}
# Select the parameter with the highest AUROC
best_param = float(max(hyper_grid_results.items(), key=operator.itemgetter(1))[0])
# Re-fit the model
label_model.fit(
train_matrix[:,lf_sample[1]], n_epochs=1000,
seed=100, lr=0.01, l2=best_param,
)
# Save marginals for output
key = f'{lf_sample[0]}:{",".join(map(str, lf_sample[1]))}'
train_grid_results[key] = label_model.predict_proba(train_matrix[:,lf_sample[1]])
dev_grid_results[key] = label_model.predict_proba(dev_matrix[:,lf_sample[1]])
test_grid_results[key] = label_model.predict_proba(test_matrix[:,lf_sample[1]])
models[key] = label_model
return train_grid_results, dev_grid_results, test_grid_results, models
def get_model_performance(gold_labels, result_grid, num_lfs=1):
records = []
for key in result_grid:
precision, recall, _ = precision_recall_curve(
gold_labels,
result_grid[key][:,1]
)
fp, tp, _ = roc_curve(
gold_labels,
result_grid[key][:,1]
)
records.append({
"aupr":auc(recall, precision),
"auroc":auc(fp,tp),
"lf_sample": key,
"lf_num":num_lfs
})
return | pd.DataFrame.from_records(records) | pandas.DataFrame.from_records |
#!/usr/bin/env python
import os
import csv
import sys
import time
import glob
import logging
import warnings
import argparse
import traceback
import multiprocessing
import Bio
import numpy as np
import pandas as pd
from tqdm import tqdm
from Bio import SeqIO
import concurrent.futures
from concurrent import futures
from inStrain import SNVprofile
from collections import defaultdict
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from Bio.codonalign.codonalphabet import default_codon_table
import inStrain.SNVprofile
import inStrain.controller
#import inStrain.profileUtilities
import inStrain.logUtils
class Controller():
'''
The command line access point to the program
'''
def main(self, args):
'''
The main method when run on the command line
'''
# Parse arguments
args = self.validate_input(args)
vargs = vars(args)
IS = vargs.pop('IS')
GF = vargs.pop('gene_file')
# Read the genes file
logging.debug('Loading genes')
scaff_2_gene_database, scaff2gene2sequence = parse_genes(GF, **vargs)
GdbP = pd.concat([x for x in scaff_2_gene_database.values()])
# Calculate all your parallelized gene-level stuff
name2result = calculate_gene_metrics(IS, GdbP, scaff2gene2sequence, **vargs)
# Store information
IS.store('genes_fileloc', GF, 'value', 'Location of genes file that was used to call genes')
IS.store('genes_table', GdbP, 'pandas', 'Location of genes in the associated genes_file')
IS.store('genes_coverage', name2result['coverage'], 'pandas', 'Coverage of individual genes')
IS.store('genes_clonality', name2result['clonality'], 'pandas', 'Clonality of individual genes')
IS.store('genes_SNP_count', name2result['SNP_density'], 'pandas', 'SNP density and counts of individual genes')
IS.store('SNP_mutation_types', name2result['SNP_mutation_types'], 'pandas', 'The mutation types of SNPs')
if vargs.get('store_everything', False):
IS.store('scaff2gene2sequence', scaff2gene2sequence, 'pickle', 'Dicitonary of scaff -> gene -> nucleotide sequence')
# Store the output
IS.generate('gene_info', **vargs)
IS.generate("SNVs", **vargs)
def validate_input(self, args):
'''
Validate and mess with the arguments a bit
'''
# Make sure the IS object is OK and load it
assert os.path.exists(args.IS)
args.IS = inStrain.SNVprofile.SNVprofile(args.IS)
# Set up the logger
log_loc = args.IS.get_location('log') + 'log.log'
inStrain.controller.setup_logger(log_loc)
return args
def gene_profile_worker(gene_cmd_queue, gene_result_queue, single_thread=False):
'''
Worker to profile splits
'''
while True:
# Get command
if not single_thread:
cmds = gene_cmd_queue.get(True)
else:
try:
cmds = gene_cmd_queue.get(timeout=5)
except:
return
# Process cmd
GPs = profile_genes_wrapper(cmds)
gene_result_queue.put(GPs)
def profile_genes_wrapper(cmds):
'''
Take a group of commands and run geneprofile
'''
results = []
for cmd in cmds:
try:
results.append(profile_genes(cmd.scaffold, **cmd.arguments))
except Exception as e:
print(e)
traceback.print_exc()
logging.error("FAILURE GeneException {0}".format(str(cmd.scaffold)))
results.append(None)
return results
def calculate_gene_metrics(IS, GdbP, scaff2gene2sequenceP, **kwargs):
'''
Calculate the metrics of all genes on a parallelized scaffold-level basis
IS = Initialized inStrain.SNVprofile
GdbP = List of gene locations
gene2sequenceP = Dicitonary of gene -> nucleotide sequence
'''
inStrain.logUtils.log_checkpoint("GeneProfile", "calculate_gene_metrics", "start")
# Get key word arguments for the wrapper
p = int(kwargs.get('processes', 6))
# Make a list of scaffolds to profile the genes of
scaffolds_with_genes = set(GdbP['scaffold'].unique())
scaffolds_in_IS = set(IS._get_covt_keys())
scaffolds_to_profile = scaffolds_with_genes.intersection(scaffolds_in_IS)
logging.info("{0} scaffolds with genes in the input; {1} scaffolds in the IS, {2} to compare".format(
len(scaffolds_with_genes), len(scaffolds_in_IS), len(scaffolds_to_profile)))
# Calculate scaffold -> number of genes to profile
s2g = GdbP['scaffold'].value_counts().to_dict()
kwargs['s2g'] = s2g
# Make global objects for the profiling
inStrain.logUtils.log_checkpoint("GeneProfile", "make_globals", "start")
global CumulativeSNVtable
CumulativeSNVtable = IS.get('cumulative_snv_table')
if len(CumulativeSNVtable) > 0:
CumulativeSNVtable = CumulativeSNVtable.sort_values('mm')
else:
CumulativeSNVtable = pd.DataFrame(columns=['scaffold'])
global covTs
covTs = IS.get('covT', scaffolds=scaffolds_to_profile)
global clonTs
clonTs = IS.get('clonT', scaffolds=scaffolds_to_profile)
global scaff2gene2sequence
scaff2gene2sequence = scaff2gene2sequenceP
global Gdb
Gdb = GdbP
inStrain.logUtils.log_checkpoint("GeneProfile", "make_globals", "end")
# Generate commands and queue them
logging.debug('Creating commands')
cmd_groups = [x for x in iterate_commands(scaffolds_to_profile, Gdb, kwargs)]
logging.debug('There are {0} cmd groups'.format(len(cmd_groups)))
inStrain.logUtils.log_checkpoint("GeneProfile", "create_queue", "start")
gene_cmd_queue = multiprocessing.Queue()
gene_result_queue = multiprocessing.Queue()
GeneProfiles = []
for cmd_group in cmd_groups:
gene_cmd_queue.put(cmd_group)
inStrain.logUtils.log_checkpoint("GeneProfile", "create_queue", "end")
if p > 1:
logging.debug('Establishing processes')
processes = []
for i in range(0, p):
processes.append(multiprocessing.Process(target=gene_profile_worker, args=(gene_cmd_queue, gene_result_queue)))
for proc in processes:
proc.start()
# Set up progress bar
pbar = tqdm(desc='Profiling genes: ', total=len(cmd_groups))
# Get the results
recieved_profiles = 0
while recieved_profiles < len(cmd_groups):
GPs = gene_result_queue.get()
recieved_profiles += 1
pbar.update(1)
for GP in GPs:
if GP is not None:
logging.debug(GP[4])
GeneProfiles.append(GP)
# Close multi-processing
for proc in processes:
proc.terminate()
# Close progress bar
pbar.close()
else:
gene_profile_worker(gene_cmd_queue, gene_result_queue, single_thread=True)
logging.info("Done profiling genes")
# Get the genes
recieved_profiles = 0
while recieved_profiles < len(cmd_groups):
logging.debug('going to grab at {0}'.format(recieved_profiles))
GPs = gene_result_queue.get(timeout=5)
logging.debug('did a grab at {0}'.format(recieved_profiles))
recieved_profiles += 1
for GP in GPs:
if GP is not None:
logging.debug(GP[4])
GeneProfiles.append(GP)
inStrain.logUtils.log_checkpoint("GeneProfile", "return_results", "start")
name2result = {}
for i, name in enumerate(['coverage', 'clonality', 'SNP_density', 'SNP_mutation_types']):
name2result[name] = pd.concat([G[i] for G in GeneProfiles])
inStrain.logUtils.log_checkpoint("GeneProfile", "return_results", "end")
inStrain.logUtils.log_checkpoint("GeneProfile", "calculate_gene_metrics", "end")
return name2result
def profile_genes(scaffold, **kwargs):
'''
This is the money that gets multiprocessed
Relies on having a global "Gdb", "gene2sequence", "CumulativeSNVtable", "covTs", and "clonTs"
* Calculate the clonality, coverage, linkage, and SNV_density for each gene
* Determine whether each SNP is synynomous or nonsynonymous
'''
# Log
pid = os.getpid()
log_message = "\nSpecialPoint_genes {0} PID {1} whole start {2}".format(scaffold, pid, time.time())
# For testing purposes
if ((scaffold == 'FailureScaffoldHeaderTesting')):
assert False
# Get the list of genes for this scaffold
gdb = Gdb[Gdb['scaffold'] == scaffold]
# Calculate gene-level coverage
log_message += "\nSpecialPoint_genes {0} PID {1} coverage start {2}".format(scaffold, pid, time.time())
if scaffold not in covTs:
logging.info("{0} isnt in covT!".format(scaffold))
cdb = pd.DataFrame()
else:
covT = covTs[scaffold]
cdb = calc_gene_coverage(gdb, covT)
del covT
log_message += "\nSpecialPoint_genes {0} PID {1} coverage end {2}".format(scaffold, pid, time.time())
# Calculate gene-level clonality
log_message += "\nSpecialPoint_genes {0} PID {1} clonality start {2}".format(scaffold, pid, time.time())
if scaffold not in clonTs:
logging.info("{0} isnt in clovT!".format(scaffold))
cldb = pd.DataFrame()
else:
clonT = clonTs[scaffold]
cldb = calc_gene_clonality(gdb, clonT)
del clonT
log_message += "\nSpecialPoint_genes {0} PID {1} clonality end {2}".format(scaffold, pid, time.time())
# Determine whether SNPs are synonmous or non-synonmous
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character start {2}".format(scaffold, pid, time.time())
Ldb = CumulativeSNVtable[CumulativeSNVtable['scaffold'] == scaffold]
if len(Ldb) == 0:
sdb = pd.DataFrame()
else:
sdb = Characterize_SNPs_wrapper(Ldb, gdb, scaff2gene2sequence[scaffold])
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character end {2}".format(scaffold, pid, time.time())
# Calculate gene-level SNP counts
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts start {2}".format(scaffold, pid, time.time())
if len(Ldb) == 0:
ldb = pd.DataFrame()
sublog = ''
else:
#ldb = calc_gene_snp_density(gdb, Ldb)
ldb, sublog = calc_gene_snp_counts(gdb, Ldb, sdb, scaff2gene2sequence[scaffold], scaffold=scaffold)
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts end {2}".format(scaffold, pid, time.time())
log_message += sublog
log_message += "\nSpecialPoint_genes {0} PID {1} whole end {2}".format(scaffold, pid, time.time())
results = (cdb, cldb, ldb, sdb, log_message)
return results
def profile_genes_from_profile(scaffold, gdb, covT, clonT, Ldb, gene2sequence):
'''
Call profile genes from elsewhere
Arguments:
scaffold = name of scaffold
gdb = gene_datatable
covT = covT for this scaffold
clonT = clonT for this scaffold
Ldb = cumulative SNP table for this scaffold
* Calculate the clonality, coverage, linkage, and SNV_density for each gene
* Determine whether each SNP is synynomous or nonsynonymous
'''
# Log
log = inStrain.logUtils.get_worker_log('ProfileGenes', scaffold, 'start')
# For testing purposes
if ((scaffold == 'FailureScaffoldHeaderTesting')):
assert False
# Calculate gene-level coverage
#log_message += "\nSpecialPoint_genes {0} PID {1} coverage start {2}".format(scaffold, pid, time.time())
cdb = calc_gene_coverage(gdb, covT)
#log_message += "\nSpecialPoint_genes {0} PID {1} coverage end {2}".format(scaffold, pid, time.time())
# Calculate gene-level clonality
# log_message += "\nSpecialPoint_genes {0} PID {1} clonality start {2}".format(scaffold, pid, time.time())
cldb = calc_gene_clonality(gdb, clonT)
# log_message += "\nSpecialPoint_genes {0} PID {1} clonality end {2}".format(scaffold, pid, time.time())
# Determine whether SNPs are synonmous or non-synonmous
#log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character start {2}".format(scaffold, pid, time.time())
sdb = Characterize_SNPs_wrapper(Ldb, gdb, gene2sequence)
#log_message += "\nSpecialPoint_genes {0} PID {1} SNP_character end {2}".format(scaffold, pid, time.time())
# Calculate gene-level SNP counts
#log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts start {2}".format(scaffold, pid, time.time())
ldb, sublog = calc_gene_snp_counts(gdb, Ldb, sdb, gene2sequence, scaffold=scaffold)
#log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts end {2}".format(scaffold, pid, time.time())
#log_message += sublog
log += inStrain.logUtils.get_worker_log('ProfileGenes', scaffold, 'end')
results = (cdb, cldb, ldb, sdb, log)
return results
def calc_gene_coverage(gdb, covT):
'''
Gene-level and mm-level coverage
'''
table = defaultdict(list)
for mm, cov in iterate_covT_mms(covT):
if len(cov) == 0:
continue
for i, row in gdb.iterrows():
gcov = cov.loc[int(row['start']):int(row['end'])]
gLen = abs(row['end'] - row['start']) + 1
table['gene'].append(row['gene'])
table['coverage'].append(gcov.sum() / gLen)
table['breadth'].append(len(gcov) / gLen)
table['mm'].append(mm)
return pd.DataFrame(table)
def iterate_clonT_mms(clonT):
p2c = {}
mms = sorted([int(mm) for mm in list(clonT.keys())])
for mm in mms:
for pos, val in clonT[mm].items():
p2c[pos] = val
inds = []
vals = []
for ind in sorted(p2c.keys()):
inds.append(ind)
vals.append(p2c[ind])
yield mm, pd.Series(data = vals, index = np.array(inds).astype('int'))
def iterate_covT_mms(clonT):
counts = pd.Series()
mms = sorted([int(mm) for mm in list(clonT.keys())])
for mm in mms:
count = clonT[mm]
counts = counts.add(count, fill_value=0)
yield mm, counts
def calc_gene_clonality(gdb, clonT):
'''
Gene-level and mm-level clonality
'''
table = defaultdict(list)
for mm, cov in iterate_clonT_mms(clonT):
if len(cov) == 0:
continue
for i, row in gdb.iterrows():
gcov = cov.loc[int(row['start']):int(row['end'])]
gLen = abs(row['end'] - row['start']) + 1
table['gene'].append(row['gene'])
try:
microdiversity = 1 - gcov.mean()
except :
microdiversity = np.nan
#table['clonality'].append(gcov.mean())
table['nucl_diversity'].append(microdiversity)
table['breadth_minCov'].append(len(gcov) / gLen)
table['mm'].append(mm)
return pd.DataFrame(table)
#
# def calc_gene_snp_density(gdb, ldb):
# '''
# Gene-level and mm-level clonality
# '''
# table = defaultdict(list)
#
# for mm in sorted(ldb['mm'].unique()):
# db = ldb[ldb['mm'] <= mm].drop_duplicates(subset=['scaffold', 'position'], keep='last')
# cov = db.set_index('position')['ref_base'].sort_index()
# if len(cov) == 0:
# continue
#
# for i, row in gdb.iterrows():
# gcov = cov.loc[int(row['start']):int(row['end'])]
# gLen = abs(row['end'] - row['start']) + 1
#
# table['gene'].append(row['gene'])
# table['SNPs_per_bp'].append(len(gcov) / gLen)
# table['mm'].append(mm)
#
# return pd.DataFrame(table)
def count_sites(seq, k=1, codon_table=None):
'''
From a nucleotide sequence and codon table, calculate S and N sites
'''
codon_lst = convert_to_codons(seq)
if codon_table is None:
codon_table = default_codon_table
S_site = 0.0 # synonymous sites
N_site = 0.0 # non-synonymous sites
purine = ('A', 'G')
pyrimidine = ('T', 'C')
base_tuple = ('A', 'T', 'C', 'G')
for codon in codon_lst:
neighbor_codon = {'transition': [], 'transversion': []}
# classify neighbor codons
codon = codon.replace('U', 'T')
if codon == '---':
continue
if 'N' in codon:
continue
for n, i in enumerate(codon):
for j in base_tuple:
if i == j:
pass
elif i in purine and j in purine:
codon_chars = [c for c in codon]
codon_chars[n] = j
this_codon = ''.join(codon_chars)
neighbor_codon['transition'].append(this_codon)
elif i in pyrimidine and j in pyrimidine:
codon_chars = [c for c in codon]
codon_chars[n] = j
this_codon = ''.join(codon_chars)
neighbor_codon['transition'].append(this_codon)
else:
codon_chars = [c for c in codon]
codon_chars[n] = j
this_codon = ''.join(codon_chars)
neighbor_codon['transversion'].append(this_codon)
# count synonymous and non-synonymous sites
#codon = codon.replace('T', 'U')
if (codon in ['TAG', 'TAA', 'TGA']):
#print("STOP DETECTED")
continue
aa = codon_table.forward_table[codon]
this_codon_N_site = this_codon_S_site = 0
for neighbor in neighbor_codon['transition']:
if neighbor in codon_table.stop_codons:
this_codon_N_site += 1
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += 1
else:
this_codon_N_site += 1
for neighbor in neighbor_codon['transversion']:
if neighbor in codon_table.stop_codons:
this_codon_N_site += k
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += k
else:
this_codon_N_site += k
norm_const = (this_codon_N_site + this_codon_S_site)/3
S_site += float(this_codon_S_site) / float(norm_const)
N_site += float(this_codon_N_site) / float(norm_const)
return (S_site, N_site)
def convert_to_codons(seq):
codons = []
for c in zip(*(iter(seq),) * 3):
co = ''.join(c)
assert len(co) == 3
codons.append(co)
return codons
def calc_gene_snp_counts(gdb, ldb, sdb, gene2sequence, scaffold=None):
'''
Count the number of SNPs in each gene, as well as N and S sites
RELIES ON HAVING gene2sequence AS A GLOBAL (needed for multiprocessing speed)
Argumnets:
gdb = table of genes
ldb = Raw cumulative snp table for a single scaffold (mm-level)
sdb = SNP table with N and S and I annotated
'''
if len(ldb) == 0:
return pd.DataFrame(), ''
pid = os.getpid()
# Merge ldb and sdb
xdb = pd.merge(ldb, sdb[['position', 'mutation_type', 'gene']],
on=['position'], how='left').reset_index(drop=True)
# Calculate counts of N and S sites
log_message = "\nSpecialPoint_genes {0} PID {1} SNP_counts_SiteCalc start {2}".format(scaffold, pid, time.time())
table = defaultdict(list)
for i, row in gdb.iterrows():
try:
S_site, N_site = count_sites(gene2sequence[row['gene']])
except:
S_site = np.nan
N_site = np.nan
table['gene'].append(row['gene'])
table['S_sites'].append(S_site)
table['N_sites'].append(N_site)
SiteDb = pd.DataFrame(table)
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts_SiteCalc end {2}".format(scaffold, pid, time.time())
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts_geneCalc start {2}".format(scaffold, pid, time.time())
table = defaultdict(list)
for mm in sorted(xdb['mm'].unique()):
# Filter to this mm level and set up for quick indexing
fdb = xdb[xdb['mm'] <= mm].sort_values('mm').drop_duplicates(subset=['scaffold', 'position'], keep='last').sort_values('position').set_index('position')
for i, row in gdb.iterrows():
# Calculate gene length
gLen = abs(row['end'] - row['start']) + 1
# Subset to this gene
db = fdb.loc[int(row['start']):int(row['end'])]
# Report summary stuff
table['mm'].append(mm)
table['gene'].append(row['gene'])
table['gene_length'].append(gLen)
table['divergent_site_count'].append(len(db))
# Report type counts
for allele_count, name in zip([1, 2], ['SNS', 'SNV']):
table['{0}_count'.format(name)].append(len(db[db['allele_count'] == allele_count]))
for snp_type in ['N', 'S']:
table["{0}_{1}_count".format(name, snp_type)].append(
len(db[(db['allele_count'] == allele_count) & (db['mutation_type'] == snp_type)]))
GGdb = pd.DataFrame(table).merge(SiteDb, on='gene', how='left').reset_index(drop=True)
log_message += "\nSpecialPoint_genes {0} PID {1} SNP_counts_geneCalc end {2}".format(scaffold, pid, time.time())
# Calculate dn/ds
GGdb.loc[:, 'dNdS_substitutions'] = [((nC/nS) / (sC/sS)) if ((sC > 0) & (sS > 0)) else np.nan for nC, nS, sC, sS in zip(
GGdb['SNS_N_count'], GGdb['N_sites'],
GGdb['SNS_S_count'], GGdb['S_sites'])]
GGdb.loc[:, 'pNpS_variants'] = [((nC/nS) / (sC/sS)) if ((sC > 0) & (sS > 0)) else np.nan for nC, nS, sC, sS in zip(
GGdb['SNV_N_count'], GGdb['N_sites'],
GGdb['SNV_S_count'], GGdb['S_sites'])]
# GGdb['SNPs_per_bp'] = [x/y if y > 0 else np.nan for x, y in \
# zip(GGdb['divergent_site_count'], GGdb['gene_length'])]
return GGdb, log_message
def Characterize_SNPs_wrapper(Ldb, gdb, gene2sequence):
'''
A wrapper for characterizing SNPs
RELIES ON HAVING gene2sequence AS A GLOBAL (needed for multiprocessing speed)
Arguments:
Ldb = CumulativeSNVtable for a single scaffold
gdb = table of genes
Returns:
Sdb = The Cumulative SNV table with extra information added
'''
if len(Ldb) == 0:
return pd.DataFrame()
# Get a non-nonredundant list of SNPs
Sdb = Ldb.drop_duplicates(subset=['scaffold', 'position'], keep='last')\
.sort_index().drop(columns=['mm'])
Sdb.loc[:, 'position'] = Sdb['position'].astype(int)
# Filter out SNPs that shouldn't be profiled like this
# Sdb = Sdb[Sdb['cryptic'] == False]
# Sdb = Sdb.drop(columns="cryptic")
if 'morphia' in Sdb.columns:
col = 'morphia'
else:
col = 'allele_count'
Sdb[col] = Sdb[col].astype(int)
Sdb = Sdb[(Sdb[col] > 0) & (Sdb[col] <= 2)]
# Make sure some SNPs to profile remain
if len(Sdb) == 0:
return pd.DataFrame()
# Characterize
sdb = characterize_SNPs(gdb, Sdb, gene2sequence)
assert len(Sdb) == len(sdb)
sdb = pd.merge(Sdb, sdb, on=['position'], how='left').reset_index(drop=True)
# Return
return sdb
def characterize_SNPs(gdb, Sdb, gene2sequence):
'''
Determine the type of SNP (synonymous, non-synynomous, or intergenic)
RELIES ON HAVING gene2sequence AS A GLOBAL (needed for multiprocessing speed)
'''
table = defaultdict(list)
for i, row in Sdb.iterrows():
db = gdb[(gdb['start'] <= row['position']) & (gdb['end'] >= row['position'])]
if len(db) == 0:
table['position'].append(row['position'])
table['mutation_type'].append('I')
table['mutation'].append('')
table['gene'].append('')
elif len(db) > 1:
table['position'].append(row['position'])
table['mutation_type'].append('M')
table['mutation'].append('')
table['gene'].append(','.join(db['gene'].tolist()))
else:
# Get the original sequence
original_sequence = gene2sequence[db ['gene'].tolist()[0]]
if db['direction'].tolist()[0] == '-1':
original_sequence = original_sequence.reverse_complement()
# Make the new sequence
snp_start = row['position'] - db['start'].tolist()[0]
new_sequence = original_sequence.tomutable()
new_sequence[snp_start] = row['con_base']
if new_sequence[snp_start] == original_sequence[snp_start]:
new_sequence[snp_start] = row['var_base']
new_sequence = new_sequence.toseq()
# Translate
if db['direction'].tolist()[0] == '-1':
old_aa_sequence = original_sequence.reverse_complement().translate()
new_aa_sequence = new_sequence.reverse_complement().translate()
else:
old_aa_sequence = original_sequence.translate()
new_aa_sequence = new_sequence.translate()
# old_aa_sequence = original_sequence.translate()
# new_aa_sequence = new_sequence.translate()
# Find mutation
mut_type = 'S'
mut = 'S:' + str(snp_start)
for aa in range(0, len(old_aa_sequence)):
if new_aa_sequence[aa] != old_aa_sequence[aa]:
mut_type = 'N'
mut = 'N:' + str(old_aa_sequence[aa]) + str(snp_start) + str(new_aa_sequence[aa])
break
# Store
table['position'].append(row['position'])
table['mutation_type'].append(mut_type)
table['mutation'].append(mut)
table['gene'].append(db['gene'].tolist()[0])
return pd.DataFrame(table)
def iterate_commands(scaffolds_to_profile, Gdb, kwargs):
'''
Break into individual scaffolds
'''
processes = kwargs.get('processes', 6)
s2g = kwargs.get('s2g', None)
SECONDS = min(60, sum(calc_estimated_runtime(s2g[scaffold]) for scaffold in scaffolds_to_profile)/(processes+1))
cmds = []
seconds = 0
for scaffold, gdb in Gdb.groupby('scaffold'):
if scaffold not in scaffolds_to_profile:
continue
# make this comammand
cmd = Command()
cmd.scaffold = scaffold
cmd.arguments = kwargs
# Add estimated seconds
seconds += calc_estimated_runtime(s2g[scaffold])
cmds.append(cmd)
# See if you're done
if seconds >= SECONDS:
yield cmds
seconds = 0
cmds = []
yield cmds
def calc_estimated_runtime(pairs):
SLOPE_CONSTANT = 0.01
return pairs * SLOPE_CONSTANT
class Command():
def __init__(self):
pass
def parse_genes(gene_file_loc, **kwargs):
'''
Parse a file of genes based on the file extention.
Currently supported extentions are:
.fna (prodigal)
.gb / .gbk (genbank)
Methods return a table of genes (Gdb) and a dictionary of gene -> sequence
'''
if ((gene_file_loc[-4:] == '.fna') | (gene_file_loc[-3:] == '.fa')):
return parse_prodigal_genes(gene_file_loc)
elif ((gene_file_loc[-3:] == '.gb') | (gene_file_loc[-4:] == '.gbk')):
return parse_genbank_genes(gene_file_loc)
else:
print("I dont know how to process {0}".format(gene_file_loc))
raise Exception
def parse_prodigal_genes(gene_fasta):
'''
Parse the prodigal .fna file
Return a datatable with gene info and a dictionary of gene -> sequence
'''
scaff2gene2sequence = {}
scaff2geneinfo = {}
for record in SeqIO.parse(gene_fasta, 'fasta'):
gene = str(record.id)
scaff = "_".join(gene.split("_")[:-1])
if scaff not in scaff2geneinfo:
scaff2geneinfo[scaff] = defaultdict(list)
table = scaff2geneinfo[scaff]
table['gene'].append(gene)
table['scaffold'].append(scaff)
table['direction'].append(record.description.split("#")[3].strip())
table['partial'].append('partial=01' in record.description)
# NOTE: PRODIGAL USES A 1-BASED INDEX AND WE USE 0, SO CONVERT TO 0 HERE
table['start'].append(int(record.description.split("#")[1].strip())-1)
table['end'].append(int(record.description.split("#")[2].strip())-1)
if scaff not in scaff2gene2sequence:
scaff2gene2sequence[scaff] = {}
scaff2gene2sequence[scaff][gene] = record.seq
scaffs = list(scaff2geneinfo.keys())
for scaff in scaffs:
scaff2geneinfo[scaff] = | pd.DataFrame(scaff2geneinfo[scaff]) | pandas.DataFrame |
from typing import List
from functools import partial
import warnings
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.api.types import is_numeric_dtype
from tqdm import tqdm
def plot_feature(
feat: str,
y_col: str = None, # janky, making kwarg for easier functools.partial...
df: pd.DataFrame = None,
is_binary_outcome: bool = None,
coerce_cat_thresh: float = 0.001,
graph_sample: int = 10_000,
cat_freq_thresh: float = 0.01,
do_scatter: bool = False,
ylabel: str = None,
) -> None:
try:
this_df = df.copy().dropna(subset=[feat, y_col])
is_cat = (this_df[feat].dtype == "object") or (hasattr(this_df[feat], "cat"))
cardinality = this_df[feat].nunique()
rel_cardinality = cardinality / len(this_df)
is_dt = "datetime" in this_df[feat].dtype.name # HACK
is_numeric = is_numeric_dtype(this_df[feat])
is_binary = cardinality == 2
plot_lowess = not is_binary_outcome
graph_as_cat = (
(is_cat or (rel_cardinality < coerce_cat_thresh))
and not is_dt
and not is_numeric
) or is_binary
if graph_as_cat:
freqs = this_df[feat].value_counts(normalize=True)
# Filter on level of var occurred > cat_freq_thresh % of times; sort by freq
freqs = freqs.loc[freqs >= cat_freq_thresh]
levels_to_eval = freqs.index
if not list(levels_to_eval):
return None # very high cardinality, skip
plt.figure(figsize=(12, 8))
sns.catplot(
x=feat,
y=y_col,
data=this_df,
kind="point",
join=False,
order=levels_to_eval,
)
plt.xticks(rotation=90)
plt.title(f"{feat} -> {y_col}?")
if ylabel:
plt.ylabel(ylabel)
plt.show()
# consider dt to be days since minimum TS
elif is_dt and not is_numeric:
min_ts = this_df[feat].min()
days_since_min = (
pd.to_datetime(this_df[feat]) - | pd.to_datetime(this_df[feat]) | pandas.to_datetime |
import json
import sys
import pprint
import pandas as pd
from .KeyWordsSearch import search_phrases
from .Preprocessing_tools import full_preprocess_text, prepare_files, open_text
from .constnats import key_phrase, key_meal, key_category, greeting_key, farewell_key, pay_key, \
additional_key, new_key, order_key, operator_id, client_id, vacancy_key, key_answer
from ..dirs import STAGES_FILE, DATA_NORMALIZED_SELLER_DIR, DATA_NORMALIZED_CLIENT_DIR
STAGE_NAMING = {'приветствие': 'Greeting', 'новинка': 'OfferOfNewProductsOrPromotions', 'заказ': 'Order',
'доп': 'AdditionalSales', 'оплата': 'Payment', 'прощание': 'CompletionSale'}
ADDITION_NAMING = {'соус': 'Sauces', 'напитк': 'Drinks', 'пив': 'Drinks', 'пепс кол': 'Drinks',
'десерт': 'Desserts', 'основн': 'Garnishes', 'гарнир': 'Garnishes', 'завтрак': 'Garnishes',
'салат': 'Garnishes', 'закуск': 'Snacks', 'букет и снекбокс': 'Snacks', 'проч': 'Etc',
'проч товар': 'Etc', 'игрушк': 'Etc',
'доп': 'Etc', 'холодн напитк': 'Drinks', 'горяч напитк': 'Drinks'
}
def format_result(final_stages_time, vacancy_response, additional_sell, additional_sell_response,
additional_sale_adequacy, missed_addition, missed_addition_list):
result = {}
result['Vacancy'] = {'VacancyAvailability': int(len(final_stages_time[vacancy_key]) > 0),
'Result': 0}
if len(vacancy_response) > 0:
result['Vacancy']['Result'] = int(vacancy_response[1] == 'положительн')
final_stages_time.pop(vacancy_key)
result['Script'] = {STAGE_NAMING[x]: int(len(final_stages_time[x]) != 0) for x in final_stages_time.keys()}
for k in final_stages_time.keys():
if result['Script'][STAGE_NAMING[k]] != 0:
result['Script'][f'{STAGE_NAMING[k]}Time'] = final_stages_time[k][1] - final_stages_time[k][0]
else:
result['Script'][f'{STAGE_NAMING[k]}Time'] = 0.0
result['AdditionalSales'] = {}
for group in ADDITION_NAMING.keys():
result['AdditionalSales'][ADDITION_NAMING[group]] = {'Proposed': 0, 'Successful': 0, 'Adequate': 0, 'Missed': 0}
if len(additional_sell) > 0:
for group in additional_sell[1]:
result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed'] = 1
if len(additional_sell_response) > 0 and result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed']:
result['AdditionalSales'][ADDITION_NAMING[group]]['Successful'] = int(
additional_sell_response[1] == 'agree')
if len(additional_sale_adequacy) > 0 and result['AdditionalSales'][ADDITION_NAMING[group]]['Proposed']:
result['AdditionalSales'][ADDITION_NAMING[group]]['Adequate'] = int(
additional_sale_adequacy[0] == 'ok')
result['AdditionalSales'][ADDITION_NAMING[group]]['Missed'] = 0
else:
for group in ADDITION_NAMING.values():
result['AdditionalSales'][group]['Successful'] = 0
result['AdditionalSales'][group]['Adequate'] = 0
result['AdditionalSales'][group]['Missed'] = int(
any([ADDITION_NAMING[miss] == group for miss in missed_addition_list]))
return result
def parse_diarization_file(file_name):
all_phrases = open_text(file_name).split('\n')
operator_name = all_phrases[0].strip()
res = {client_id: [], operator_id: []}
max_time = 0.0
for seconds_text in all_phrases[1:]:
if seconds_text == '':
continue
start_end_text = seconds_text.split(', ')
max_time = float(start_end_text[1])
res[int(start_end_text[2])].append([float(start_end_text[0]), max_time,
full_preprocess_text(start_end_text[3])])
return res, max_time
def search_stage(full_text, file_stage):
all_info = pd.read_excel(file_stage, engine='openpyxl')
key_words = list(all_info[key_phrase].dropna().values)
return search_phrases(full_text, key_words)
def update_stages_id(stages_id, files, full_text, key):
for file in files:
found_ids = search_stage(full_text, DATA_NORMALIZED_SELLER_DIR / file)
if len(found_ids) > 0:
stages_id[key] += found_ids
if len(stages_id[key]) > 0:
new_ids = list(stages_id[key][0])
for i in range(1, len(stages_id[key])):
new_ids[1] = stages_id[key][i][1]
stages_id[key] = new_ids
def check_missed_addition(file, order):
all_info = pd.read_excel(file, engine='openpyxl')
order_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', order)
res = set()
for index, row in all_info.iterrows():
if row['Order_category'] == order_category[0]:
if type(row['Ok']) == str:
res.add(row['Ok'])
return res
def check_adequacy(file, order, additional):
all_info = pd.read_excel(file, engine='openpyxl')
order_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', order)
additional_category = find_order_category(DATA_NORMALIZED_SELLER_DIR / 'category_menu.xlsx', additional)
ok = []
not_ok = []
for index, row in all_info.iterrows():
if row['Order_category'] == order_category[0]:
if type(row['Ok']) == str:
ok.append(row['Ok'])
if type(row['Not_ok']) == str:
not_ok.append(row['Not_ok'])
res = ['UNK'] * len(additional_category)
for i, cat in enumerate(additional_category):
if cat in ok:
res[i] = 'ok'
if cat in not_ok:
res[i] = 'not_ok'
return res
def find_order_category(file, order):
all_info = pd.read_excel(file, engine='openpyxl')
meal_cat = {}
for index, row in all_info.iterrows():
meal_cat[row['Key_phrase']] = row['Category']
ids = search_phrases(order, list(meal_cat.keys()), interfere_thresh=0)
res = []
for id in ids:
res.append(order[id[0]:id[1]].strip())
return [meal_cat[x] for x in res]
def find_additional_response(files, stage_text):
all_info = pd.DataFrame(columns=['Key_phrase', 'category'])
for file in files:
tmp = pd.read_excel(DATA_NORMALIZED_CLIENT_DIR / file, engine='openpyxl')
tmp['category'] = pd.Series([file.split('.')[0].split('_')[1]] * len(tmp))
all_info = pd.concat([all_info, tmp], axis=0)
all_info = all_info.dropna()
response_cat = dict()
for index, row in all_info.iterrows():
response_cat[row['Key_phrase']] = row['category']
ids = search_phrases(stage_text, list(response_cat.keys()), interfere_thresh=0)
res = []
for id in ids:
res.append(stage_text[id[0]:id[1]].strip())
return res, [response_cat[x] for x in res]
def find_vacancy_response(file, stage_text):
all_info = pd.read_excel(DATA_NORMALIZED_SELLER_DIR / file, engine='openpyxl')
all_info = all_info.drop([key_phrase], axis=1).dropna()
response_cat = dict()
for index, row in all_info.iterrows():
response_cat[row[key_answer]] = row[key_category]
ids = search_phrases(stage_text, list(response_cat.keys()))
res = []
for id in ids:
res.append(stage_text[id[0]:id[1]].strip())
return res, [response_cat[x] for x in res]
def find_additional_cat(file, stage_text):
all_info = | pd.read_excel(DATA_NORMALIZED_SELLER_DIR / file, engine='openpyxl') | pandas.read_excel |
import sys
import json
import time
import numpy as np
import pandas as pd
import logging
import requests
def get_logger(logname):
# Create and configure logger
logging.basicConfig(filename='app.log',
filemode='a',
format='%(name)s: %(levelname)s - %(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
# Creating an object
logger = logging.getLogger(logname)
# Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
return logger
def generate(host, port, token, data, interval_ms, verbose):
interval_secs = interval_ms / 1000.0
def iter_json(x):
payload = x.to_json()
# payload = json.dumps(x)
headers = {'Content-Type': 'application/json', }
if verbose:
logger.debug(payload)
url_post = 'http://{0}:{1}/api/v1/{2}/telemetry'.format(host, port, token)
requests.post(url_post, headers=headers, data=payload)
time.sleep(interval_secs)
print(payload)
while True:
data.apply(iter_json, axis=1)
# time.sleep(interval_secs)
# grouped_data = data.groupby("Timestamp")
# for timestamp, values in grouped_data:
# df = grouped_data.get_group(timestamp)
# df.apply(iter_json,axis=1)
# time.sleep(interval_secs)
def loaddata(filepath1, filepath2, keys):
drop_cols = True
cols_drop = ['op_setting3', 's1','s5','s6','s10','s14','s16','s18','s19']
data = | pd.read_csv(filepath1, sep=" ", header=None) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tqdm
import data_loader
import model
from config import DBGConfig
dbg_config = DBGConfig()
checkpoint_dir = dbg_config.checkpoint_dir
result_dir = dbg_config.result_dir
tscale = dbg_config.tscale
feature_dim = dbg_config.feature_dim
# test batch size
batch_size = dbg_config.test_batch_size
test_mode = dbg_config.test_mode
mask = np.zeros([tscale, tscale], np.float32)
for i in range(tscale):
for j in range(i, tscale):
mask[i, j] = 1
tf_mask = tf.convert_to_tensor(mask, tf.float32)
tf_mask = tf.reshape(tf_mask, [1, tscale, tscale, 1])
if __name__ == "__main__":
X_feature = tf.placeholder(tf.float32, shape=(batch_size, tscale, feature_dim))
scores, iou_mat, x1, x2, xc, prop_start, prop_end = model.model(X_feature, training=False)
prop_start = prop_start * tf_mask
prop_end = prop_end * tf_mask
# boundary map fusion
pstart = tf.reduce_sum(prop_start, 2) / tf.maximum(tf.reduce_sum(tf_mask, 2), 1)
pend = tf.reduce_sum(prop_end, 1) / tf.maximum(tf.reduce_sum(tf_mask, 1), 1)
model_saver = tf.train.Saver()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=tf_config)
tf.global_variables_initializer().run()
model_saver.restore(sess, os.path.join(checkpoint_dir, 'dbg_model_best'))
train_dict, val_dict, test_dict = data_loader.getDatasetDict()
if test_mode == 'validation':
video_dict = val_dict
else:
video_dict = test_dict
batch_video_list = data_loader.getBatchListTest(video_dict, batch_size, shuffle=False)
batch_result_xmin = []
batch_result_xmax = []
batch_result_iou = []
batch_result_pstart = []
batch_result_pend = []
print('Runing DBG model ...')
for idx in tqdm.tqdm(range(len(batch_video_list))):
batch_anchor_xmin, batch_anchor_xmax, batch_anchor_feature = data_loader.getProposalDataTest(
batch_video_list[idx], video_dict)
out_iou, out_start, out_end = sess.run([iou_mat, pstart, pend],
feed_dict={X_feature: batch_anchor_feature})
batch_result_xmin.append(batch_anchor_xmin)
batch_result_xmax.append(batch_anchor_xmax)
batch_result_iou.append(out_iou[:, :, :, 0])
batch_result_pstart.append(out_start[:, :, 0])
batch_result_pend.append(out_end[:, :, 0])
print('Saving results ...')
columns = ["iou", "start", "end", "xmin", "xmax"]
for idx in tqdm.tqdm(range(len(batch_video_list))):
b_video = batch_video_list[idx]
b_xmin = batch_result_xmin[idx]
b_xmax = batch_result_xmax[idx]
b_iou = batch_result_iou[idx]
b_pstart = batch_result_pstart[idx]
b_pend = batch_result_pend[idx]
for j in range(len(b_video)):
tmp_video = b_video[j]
tmp_xmin = b_xmin[j]
tmp_xmax = b_xmax[j]
tmp_iou = b_iou[j]
tmp_pstart = b_pstart[j]
tmp_pend = b_pend[j]
res = []
# save all proposals result
for i in range(tscale):
for j in range(i, tscale):
start = tmp_pstart[i]
end = tmp_pend[j]
iou = tmp_iou[i, j]
res.append([iou, start, end, tmp_xmin[i], tmp_xmax[j]])
tmp_result = np.stack(res)
tmp_df = | pd.DataFrame(tmp_result, columns=columns) | pandas.DataFrame |
import numpy as np
import pytest
from pandas.compat import StringIO, lrange, product as cart_product
from pandas import DataFrame, Index, MultiIndex, concat, read_csv
import pandas.core.common as com
from pandas.util import testing as tm
class TestMultiIndexXs(object):
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples(
[('a', 'foo'), ('a', 'bar'), ('b', 'hello'),
('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sort_index(axis=1, inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:, 0:2].loc[:, 'a']
tm.assert_frame_equal(result, expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
tm.assert_frame_equal(result, expected)
def test_xs(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
xs = frame.xs(('bar', 'two'))
xs2 = frame.loc[('bar', 'two')]
| tm.assert_series_equal(xs, xs2) | pandas.util.testing.assert_series_equal |
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
f1_score,
precision_score,
recall_score,
plot_confusion_matrix,
)
from typing import List
import pandas as pd
import matplotlib.pyplot as plt
class Model:
@staticmethod
def fit_and_predict(
X_train: pd.DataFrame,
X_test: pd.DataFrame,
y_train: pd.DataFrame,
y_test: pd.DataFrame,
list_model: List = None
) -> List:
models_base_predict = []
for result in list_model:
name, model = result
model.fit(X_train, y_train)
predict = model.predict(X_test)
accuracy = round(accuracy_score(y_test, predict), 4)
f1 = round(f1_score(y_test, predict, average="macro"), 4)
precision = round(precision_score(y_test, predict, average="macro"), 4)
recall = round(recall_score(y_test, predict, average="macro"), 4)
models_base_predict.append(
{
"name": name,
"model": model,
"predict": predict,
"accuracy": accuracy,
"f1": f1,
"precision": precision,
"recall": recall,
}
)
return models_base_predict
@staticmethod
def plot_results(list_predict, X_test, y_test, export_files=True):
for result in list_predict:
print(f"Model: {result['name']}")
metrics = {
"Accuracy": [result["accuracy"]],
"F1": [result["f1"]],
"Precision": [result["precision"]],
"Recall": [result["recall"]],
}
metrics_df = pd.DataFrame.from_dict(
metrics, orient="index", columns=["Valor"],
)
print(metrics_df)
print()
if result["name"] == "MLP":
print(f"Alpha: {result['model'].alpha}")
print(f"Hidden Layers Sizes: {result['model'].hidden_layer_sizes}")
elif result["name"] == "SVM":
print(f"C: {result['model'].C}")
# Sigma e gamma são a mesma coisa, but
# if gamma='scale' (default) is passed then it uses
# 1 / (n_features * X.var()) as value of gamma,
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC
print(f"Sigma: {result['model'].gamma}")
print()
report = classification_report(y_test, result["predict"], output_dict=True)
report_df = | pd.DataFrame(report) | pandas.DataFrame |
# %%
'''
'''
## Se importan las librerias necesarias
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import glob as glob
import datetime
import re
import jenkspy
import tkinter as tk
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300)
canvas1.pack()
# %%
def profiling():
#### Read Databases
datas=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/data_con_drop.csv',sep=';',encoding='utf-8',dtype='str')
salida=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/salida_limpia.csv',sep=';',encoding='utf-8',dtype='str')
seguimiento=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/seguimiento.csv',sep=';',encoding='utf-8',dtype='str')
virtuales=pd.read_csv('C:/Users/scadacat/Desktop/TIGO (Cliente)/Cobranzas/Notebooks/Bds/virtuales.csv',encoding='utf-8',sep=';')
df=datas.copy()
out=salida.copy()
seg=seguimiento.copy()
vir=virtuales.copy()
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],inplace=True)
out=out[out['Repetido CC']=='0']
out=out[~out.duplicated(keep='last')]
## Cleaning
df['Marca Score']=df['Marca Score'].str.strip().fillna('NO REGISTRA')
df['Marca Score'][df['Marca Score']==''] ='NO REGISTRA'
df['Analisis De Habito']=df['Analisis De Habito'].fillna('NO DEFINE')
df['Analisis De Habito'][df['Analisis De Habito']==' '] ='NO DEFINE'
df['Tipo de Cliente'][df['Tipo de Cliente']==' '] ='NO DEFINE'
df['Marca Funcional']=df['Marca Funcional'].str.replace(' ','0')
df['Marca']=df['Marca'].str.replace(' ','0')
df['Antiguedad Cliente'][df['Antiguedad Cliente']==' '] ='NO REGISTRA'
df['Perfil Digital']=df['Perfil Digital'].fillna('Sin perfil')
df['Nivel de riesgo experian']=df['Nivel de riesgo experian'].str.replace(' ','NO REGISTRA')
df['Nivel de Riesgo']=df['Nivel de Riesgo'].str.replace(' ','NO REGISTRA')
df['Nivel Estrategia Cobro']=df['Nivel Estrategia Cobro'].str.replace(' ','NO REGISTRA')
df['Real reportado en central de riesgos']=df['Real reportado en central de riesgos'].str.replace(' ','0')
df['Nivel de Riesgo'][df['Nivel de Riesgo']==' '] ='NO REGISTRA'
df['Estado del Cliente'][df['Estado del Cliente']==' '] ='SIN IDENTIFICAR'
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] ='SIN IDENTIFICAR'
df['Estrategia'][df['Estrategia']==' '] ='SIN ESTRATEGIA'
df['Autopago'][df['Autopago']==' '] ='NO APLICA'
df['Tipo de Cliente']=df['Tipo de Cliente'].fillna('NO DEFINE')
df['Tipo de Reporte a Central de Riesgos'][df['Tipo de Reporte a Central de Riesgos']==' '] ='NO REGISTRA'
df['Codigo edad de mora(para central de riesgos)']=df['Codigo edad de mora(para central de riesgos)'].str.replace(' ','NO REGISTRA')
df['Análisis Vector'][df['Análisis Vector']==' '] ='SIN IDENTIFICAR'
df['Análisis Vector_PAGOS_PARCIAL'] = np.where(df['Análisis Vector'].str.contains("PAGO PARCIAL|PAGOS PARCIAL"),"1",'0')
df['Análisis Vector_PAGO OPORTUNO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_SIN_IDENTIFICAR'] = np.where(df['Análisis Vector'].str.contains("SIN IDENTIFICAR"),"1",'0')
df['Análisis Vector_SIN_PAGO'] = np.where(df['Análisis Vector'].str.contains("SIN PAGO|FINANCIAR"),"1",'0')
df['Análisis Vector_suspension'] = np.where(df['Análisis Vector'].str.contains("SUSPENSIO"),"1",'0')
df['Análisis Vector_indeterminado'] = np.where(df['Análisis Vector'].str.contains("PAGO OPORTUNO Y NO OPORTUNO"),"1",'0')
df['Análisis Vector_pago_no_oport'] = np.where(df['Análisis Vector'].str.contains("PAGO NO OPORTUNO"),"1",'0')
df['Análisis Vector_otro_caso'] = np.where(df['Análisis Vector'].str.contains("NUEVO|FACTURAS AJUSTADAS|PROBLEMAS RECLAMACION"),"1",'0')
df['Vector Cualitativo # Suscripción'][df['Vector Cualitativo # Suscripción']==' '] = df["Vector Cualitativo # Suscripción"].mode()[0]
df['Fecha Ult Gestion']=pd.to_datetime(df['Fecha Ult Gestion'],format='%Y-%m-%d')
###PARSE DATES AND CREATE NEW FEATURES
df['Fecha de Asignacion']=pd.to_datetime(df['Fecha de Asignacion'],format='%Y-%m-%d %H:%M:%S')
df['Fecha Ult pago']=pd.to_datetime(df['Fecha Ult pago'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(df['Fecha de cuenta de cobro mas antigua'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["Dias_ult_pago"] = (df['Fecha Ult pago']).dt.day
df["dia_semana_ult_pago"] = (df['Fecha Ult pago']).dt.weekday
df["mes_ult_pago"]=df["Fecha Ult pago"].dt.month
df["semana_ult_pago"]=df["Fecha Ult pago"].dt.week
df["trimestre_ult_pago"] = df["Fecha Ult pago"].dt.quarter
df["año_ult_pago"] = df["Fecha Ult pago"].dt.year
df["DIAS_desde_ult_pago"] = (df["Fecha Ult Gestion"] - df["Fecha Ult pago"]).dt.days
df["Fecha estado corte"]=pd.to_datetime(df["Fecha estado corte"],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df["dias_ult_pago_cobro"] = (df["Fecha Ult pago"]-df["Fecha estado corte"]).dt.days
df["dias_ult_pago_fac_ant"] = (df["Fecha Ult pago"]-df["Fecha de cuenta de cobro mas antigua"]).dt.days
df['Fecha de Asignacion_mes']=df["Fecha de Asignacion"].dt.month
df['Fecha de Instalacion']=pd.to_datetime(df['Fecha de Instalacion'],format ='%Y-%m-%d %H:%M:%S',errors = "coerce")
df['antiguedad_mes']=(dt.datetime.now()-df['Fecha de Instalacion']).dt.days/365
df['Fecha Retiro']=pd.to_datetime(df['Fecha Retiro'].str.replace('4732','2020'),format='%Y-%m-%d',errors = "coerce")
df['Fecha Vencimiento Sin Recargo']=pd.to_datetime(df['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
df['dias_desde_ult_gestion']=(dt.datetime.now()-df['Fecha Ult Gestion']).dt.days
## Group labels
df['Descripcion subcategoria']=df['Descripcion subcategoria']\
.str.replace('Consumos EPM Telco|INALAMBRICOS NO JAC|unica|COMERCIAL|ENTERPRISE|MONOPRODUCTO|PYME|------------------------------|LINEA BUZON','NO REGISTRA')\
.str.replace('ESTRATO MEDIO ALTO|MEDIO ALTO','ESTRATO 4')\
.str.replace('ESTRATO ALTO|ALTO','ESTRATO 6')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('ESTRATO MEDIO|MEDIO','ESTRATO 3')\
.str.replace('ESTRATO MEDIO-BAJO|MEDIO BAJO','ESTRATO 2')\
.str.replace('BAJO BAJO|ESTRATO BAJO-BAJO|ESTRATO BAJO|BAJO','ESTRATO 1')
df['Descripcion subcategoria'][df['Descripcion subcategoria']=='-'] ='NO REGISTRA' ## No registra
df['Tipificación Cliente'][df['Tipificación Cliente']==' '] = df["Tipificación Cliente"].mode()[0] ## Reemplazo con la moda
df['Dias Suspension'][df['Dias Suspension']==' ']=0
df['Dias Suspension']=df['Dias Suspension'].astype('int')
## Group labels
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip().str.upper()\
.str.replace('TELEVISION UNE|TELEVISION INTERACTIVA|TV CABLE|TV INTERACTIVA|UNE TV|TELEVISION SIN SEÃƑ‘AL|TELEVISION SIN SEÃƑ‘AL|TV CABLE SIN SEÑAL','TELEVISION')\
.str.replace('INTERNET BANDA ANCHA|SEGUNDA CONEXION INTERNET|BANDA ANCHA|INTERNET EDATEL|INTERNET INSTANTANEO|CABLE MODEM|INTERNET DEDICADO 11|ADSL BASICO','INTERNET')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA|TIGO|ETB','UNEMOVIL')\
.str.replace('TOIP|TELEFONICA TELECOM|TELECOM|TO_SINVOZ','TELEFONIA')\
.str.replace('LÃƑÂNEA BÃƑ¡SICA','LINEA BASICA')
df['Descripcion categoria']=df['Descripcion categoria'].str.replace("[^a-zA-Z ]+", "NO REGISTRA")
df['Descripcion producto']=df['Descripcion producto'].str.replace('-','').str.strip()\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV|TELEVISIONSIN SEÑAL','TELEVISION')\
.str.replace('Internet Banda Ancha|Internet EDATEL|CABLE MODEM','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')\
.str.replace('UNE MOVIL|COLOMBIAMOVIL BOGOTA','UNEMOVIL')\
.str.replace('TOIP','TELEFONIA')
df['Descripcion producto']=df['Descripcion producto'].str.strip().str.replace('-','')\
.str.replace('TELEVISION UNE|Television Interactiva|TV CABLE |TV INTERACTIVA|UNE TV','TELEVISION')\
.str.replace('Internet Banda Ancha','INTERNET').str.replace('UNE MOVIL','UNEMOVIL')
conteo3=df['Descripcion producto'].value_counts().iloc[:7].index.tolist()
df['Descripcion producto_resumen']=df.apply(
lambda row: row['Descripcion producto'] if (row['Descripcion producto'] in conteo3)
else 'OTRO PRODUCTO',axis=1)
df['Descripcion producto_resumen']=df['Descripcion producto_resumen'].str.strip()
df['Tipo Contactabilidad'][df['Tipo Contactabilidad']==' '] ='NO REGISTRA'
df['Indicador BI'][df['Indicador BI']==' '] ='NO REGISTRA'
## Create variable
df['antiguedad_mes']=df['antiguedad_mes'].astype(int)
col = 'antiguedad_mes'
condi = [ df[col] < 12, df[col].between(12, 24, inclusive = True),df[col]>24 ]
seg_ = [ "SEGMENTO YOUNG", 'SEGMENTO MASTER','SEGMENTO LEGEND']
df["Hogar"] = np.select(condi, seg_, default=np.nan)
df['Calificación A Nivel De Suscripción'][df['Calificación A Nivel De Suscripción']==' ']=df['Calificación A Nivel De Suscripción'].mode()[0]
df['Calificación A Nivel De Suscripción']=df['Calificación A Nivel De Suscripción'].astype('int')
df['Califica_suscr_class']=pd.cut(df['Calificación A Nivel De Suscripción'],bins=5,labels=["A","B","C","D","E"]).astype(str)
df['Tipo De Documento'][df['Tipo De Documento']=='13'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].fillna('NO REGISTRA')
df['Tipo De Documento'][df['Tipo De Documento']=='1'] ='CC'
df['Tipo De Documento'][df['Tipo De Documento']==' '] ='NO REGISTRA'
df['Tipo De Documento'][df['Tipo De Documento']=='C'] ='NO REGISTRA'
df['Tipo De Documento']=df['Tipo De Documento'].str.replace('3 Cedula Extranjeria|3|1CE','CE')\
.str.replace('1 Cedula','CC')\
.str.replace('2 Nit|2',' Nit')\
.str.replace('4 Tarjeta de Identidad|4',' TI')
#### Create, clean & group variables
df['Banco 1'][df['Banco 1']==' '] ='NO REGISTRA'
df['Banco 2'][df['Banco 2']==' '] ='NO REGISTRA'
df['Banco 1'].fillna('NO REGISTRA',inplace=True)
df['Banco 2'].fillna('NO REGISTRA',inplace=True)
df['Banco 1']=df['Banco 1'].str.upper().str.strip()
df['Banco 2']=df['Banco 2'].str.upper().str.strip()
df['Banco 1']=df['Banco 1'].str.replace('BANCO COLPATRIA','COLPATRIA')\
.str.replace('COLPATRIA ENLINEA','COLPATRIA EN LINEA')\
.str.replace('GANA GANA','GANA')\
.str.replace('GANA GANA','GANA')
df["Banco 1_virtual"] =\
np.where(df["Banco 1"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
df["Banco 2_Virtual"] =\
np.where(df["Banco 2"].str.contains("LINEA|PSE|BOTON",regex = True,na = False),"1","0")
conteo_banco=df['Banco 1'].value_counts().iloc[:10].index.tolist()
df['Banco 1_Cl']=df.apply(
lambda row: row['Banco 1'] if (row['Banco 1'] in conteo_banco)
else 'OTRO BANCO',axis=1)
conteo_banco2=df['Banco 2'].value_counts().iloc[:10].index.tolist()
df['Banco 2_Cl']=df.apply(
lambda row: row['Banco 2'] if (row['Banco 2'] in conteo_banco2)
else 'OTRO BANCO',axis=1)
df['Causal'][df['Causal']==' '] ='NO REGISTRA'
df['Causal_Cl']=df['Causal']\
.str.replace('FACTURA MAYOR A LA CAPACIDAD DE PAGO|CLIENTE SE ACOGE PRODUCTO MINIMO VITAL|PRIORIDAD INGRESOS A LA CANASTA BASICA|INDISPONIBILIDAD DE MEDIOS DE PAGO POR EMERGENCIA SANITARIA|NO TIENE DINERO|INCONVENIENTES ECONOMICOS|INCONVENIENTES ECONOMICOS|CONTINGENCIA COVID-19|DESEMPLEADO|INDEPENDIENTE SIN INGRESOS DURANTE CUARENTENA|DISMINUCIÓN INGRESOS / INCONVENIENTES CON NÓMINA',
'DISMINUCIÓN DE INGRESOS')\
.str.replace('OLVIDO DE PAGO|FUERA DE LA CIUDAD|DEUDOR SE OLVIDO DEL PAGO|OLVIDO DEL PAGO / ESTA DE VIAJE',
'OLVIDO')\
.str.replace('PAGA CADA DOS MESES|PAGO BIMESTRAL','PAGO BIMESTRAL')\
.str.replace('INCONFORMIDAD EN EL VALOR FACTURADO|INCONFORMIDAD POR CAMBIO DE DOMICILIO|INCOMFORMIDAD POR CAMBIO DE DOMICILIO|PQR PENDIENTE|TIENE RECLAMO PENDIENTE','INCONFORMIDAD')\
.str.replace('OTRA PERSONA ES LA ENCARGADA DEL PAGO','OTRA PERSONA ES LA ENCARGADA DEL PAGO').str.strip()\
.str.replace('PROBLEMAS FACTURACIÓN|INCONSISTENCIAS EN CARGOS FACTURADOS|RECLAMACIÓN EN TRÃMITE|NO LE LLEGA LA FACTURA / LLEGO DESPUES DE LA FECHA DE VENCIMIENTO|LLEGO LA FACTURA DESPUES DE LA FECHA DE VENCIMIENTO|NO LLEGO FACTURA',
'FACTURA')\
.str.replace('SE NIEGA A RECIBIR INFORMACION',
'RENUENTE')\
.str.replace('INCONVENIENTES CON CANALES DE PAGO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON EL CANAL DE RECAUDO|NO HAY PROGRAMACION DEL PAGO|INCONVENIENTES CON LA ENTIDAD BANCARIA',
'INCONVENIENTES CON PAGO')\
.str.replace('REALIZARA RETIRO DEL SERVICIO|REALIZARA RETIRO / CANCELACION SERVICIO',
'REALIZARA RETIRO')
conteo_Causa=df['Causal_Cl'].value_counts().iloc[:12].index.tolist()
df['Causal_Cl']=df.apply(
lambda row: row['Causal_Cl'] if (row['Causal_Cl'] in conteo_Causa)
else 'OTRA CAUSA',axis=1)
conteo_Corte=df['Descripcion estado de corte'].value_counts().iloc[:12].index.tolist()
df['Descripcion estado de corte_Cl']=df.apply(
lambda row: row['Descripcion estado de corte'] if (row['Descripcion estado de corte'] in conteo_Corte)
else 'OTRA MOTIVO',axis=1)
df['Descripcion estado de corte_conexión'] = np.where(df['Descripcion estado de corte'].str.contains("CONEXION"),"1",'0')
df['Descripcion estado de corte_suspención'] = np.where(df['Descripcion estado de corte'].str.contains("SUSPENSION"),"1",'0')
df['Descripcion estado de corte_retiro'] = np.where(df['Descripcion estado de corte'].str.contains("RETIRO"),"1",'0')
df['Valor Total Cobrar']=df['Valor Total Cobrar'].astype('float64')
df['Valor Vencido']=df['Valor Vencido'].astype('float64')
df['Valor Factura']=df['Valor Factura'].astype('float64')
df['Valor Intereses de Mora']=df['Valor Intereses de Mora'].astype('float64')
df['Valor financiado']=df['Valor financiado'].astype('float64')
## DROPING VARIABLES
df.drop(['Causal','Codigo edad de mora(para central de riesgos)','Codigo edad de mora(para central de riesgos)',
'Estado Adminfo','Celular con mejor Contactabilidad','Archivo Convergente','Usuario','Vector de Pago'],axis=1,inplace=True)
anis=['Teléfono última gestión','Email','Telefono con mejor Contactabilidad','Email',
'Ultimo Celular Grabado','Ultimo Telefono Grabado','Ultimo Email Grabado','Celular con mejor Contactabilidad']
df.dropna(subset = ["Direccion de instalacion"], inplace=True)
df['llave']=df['Identificacion']+"_"+df['Direccion de instalacion']
df=df.sort_values('Fecha de Asignacion',ascending=True)
## Elimino los duplicados presnetados en la combinación de dichas variables
df=df[~df[['llave','# servicio suscrito/abonado','Fecha de Asignacion','Valor Total Cobrar','Valor Vencido','Descripcion localidad']].duplicated()]
df.sort_values(by=['Identificacion','# servicio suscrito/abonado','Fecha de Asignacion'],ascending=[True,True,True]).drop_duplicates('# servicio suscrito/abonado',keep='last',inplace=True)
### Cuidado con esos pendientes por gestionar
## Cantidad de servicios
cant_serv=df.groupby(['Identificacion']).agg({'Descripcion producto':'nunique','Direccion de instalacion':'nunique'})\
.reset_index().sort_values('Descripcion producto',ascending=False)\
.rename(columns={'Descripcion producto':'cantidad_ser_dir','Direccion de instalacion':'serv_dir'})
df=pd.merge(df,cant_serv,on='Identificacion')
df=df[~df.duplicated()]
# Creo dicha variabel para evitar que hayan duplicados el mismo día
df['llave_2']=df['Identificacion']+"_"+(df['Fecha de Asignacion'].astype('str'))
#
conteo=df.groupby(['Identificacion','Fecha de Asignacion','Fecha de Asignacion_mes']).agg({'Identificacion':'nunique'}).rename(columns={'Identificacion':'cantidad_mes'}).reset_index()
conteo.sort_values('Fecha de Asignacion',ascending=True,inplace=True)
conteo=conteo[~conteo['Identificacion'].duplicated(keep='last')]
conteo['llave_2']=conteo['Identificacion']+"_"+(conteo['Fecha de Asignacion'].astype('str'))
#Se crea con el fin de identificar y quedarme con las claves de cada uno
consolidar=pd.merge(df,conteo['llave_2'],on='llave_2')
#Creo variables dummies para identificar en una misma cantidad de servicios
cer1=pd.concat([pd.get_dummies(consolidar['Descripcion producto_resumen']),consolidar],axis=1) # concateno
cer1['llave_2']=cer1['Identificacion']+"_"+(cer1['Fecha de Asignacion'].astype('str'))
cer=cer1.groupby(['Identificacion']).agg({
'Descripcion producto_resumen':np.array,'Descripcion producto_resumen':'sum',
'TELEFONIA':'sum','INTERNET':'sum','TELEVISION':'sum','UNEMOVIL':'sum',
'LARGA DISTANCIA UNE':'sum','PAQUETE':'sum','OTRO PRODUCTO':'sum','LINEA BASICA':'sum',
"Valor Vencido":"sum","Valor Total Cobrar":"sum",
"Valor financiado":"sum",
"Valor Intereses de Mora":"sum"}).reset_index().\
rename(columns={'Valor Vencido':'valor vencido_sum',
'Valor Factura':'Valor Factura_sum',
'Valor financiado':'Valor financiado_sum',
'Valor Total Cobrar':'Valor Total Cobrar_sum',
'Descripcion producto_resumen':'Total servicio',
'Valor Intereses de Mora':'Valor Intereses de Mora_sum'})
cer.drop(['Total servicio'],axis=1,inplace=True)
data=pd.merge(consolidar,cer,on='Identificacion')
data=data.sort_values(['Fecha de Asignacion','Identificacion'],ascending=[True,True]).drop_duplicates('Identificacion',keep='last')
### Base de datos de la salida
out.sort_values(['Identificacion Del Cliente','Fecha_Gestion'],ascending=[True,True]).drop_duplicates(keep='last',inplace=True)
out.drop(['Unnamed: 19'],axis=1,inplace=True)
## Cruce de bases de datos de salida
full=pd.merge(data,out[['Identificacion Del Cliente','Efectivo Pago','Fecha_Pago']],
left_on='Identificacion',right_on='Identificacion Del Cliente')
full=full[~full.duplicated()]
full=full.sort_values(['Identificacion','Efectivo Pago'],ascending=[True,True]).drop_duplicates(['Identificacion'],keep='first')
full['llave_exp']=full['Identificacion']+full['# servicio suscrito/abonado']
full['valor vencido_sum'][full['valor vencido_sum'] < 0] = 0
full['ratio_vlr_vencido_cobro']=full['valor vencido_sum']/full['Valor Total Cobrar_sum']
full.drop(['llave_2','Direccion de instalacion','Banco 1','Banco 2'],axis=1,inplace=True)
### Exporto y envio a la carpeta para trabajarlo
seg['FECHA DE GESTION']=pd.to_datetime(seg['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
seg=seg.sort_values(['IDENTIFICACIóN','FECHA DE GESTION']).drop_duplicates('IDENTIFICACIóN',keep='last')
vir['Identificación']=vir['Identificación'].astype('str')
fulll=pd.merge(full,seg[['IDENTIFICACIóN','FECHA DE GESTION','CLASE DE GESTION',
'LINEA/AGENCIA/ABOGADO','CAUSAL','CICLO','OTRA GESTION',
'SE DEJO MENSAJE EN BUZON', 'DEUDOR REALIZA PROMESA DE PAGO TOTAL',
'NO CONTESTAN / OCUPADO', 'DEUDOR REALIZA PROMESA DE PAGO PARCIAL',
'NO HUBO ACUERDO', 'SE ENVIA CUPON DE PAGO','SE DEJO MENSAJE CON TERCERO',
'OTRA GESTION_sum', 'Total_segui','Cantidad_de_cobros_diff_mes', 'Cantidad_recontactos_mes',
'class_Cantidad_de_cobros_diff_mes','class_Cantidad_recontactos_mes']],
left_on='Identificacion',right_on='IDENTIFICACIóN',how='left').\
merge(vir,left_on='Identificacion',right_on='Identificación',how='left')
#libero memoria
del cer
del cer1
fulll["Efectivo Pago"] = (fulll["Efectivo Pago"]=="Efectivo").astype(int)
fulll.drop(['Valor financiado_sum','Fecha_Pago','Valor Intereses de Mora_sum','Valor Total Cobrar','Valor Total Cobrar_sum','Valor Intereses de Mora','Agencia B2B Convergente','Codigo Fraude','CAUSAL','LINEA/AGENCIA/ABOGADO',
'Celular','Valor financiado','# servicio suscrito/abonado','Fecha Ult pago','Fecha estado corte','Codigo Departamento','Centrales de riesgos','dias_desde_ult_gestion',
'Valor Honorarios','Dias_ult_pago','dia_semana_ult_pago','mes_ult_pago','semana_ult_pago','Marca','Marca Funcional','Reportado a central de riesgos','Marca Score','Autopago',
'trimestre_ult_pago','año_ult_pago','DIAS_desde_ult_pago','dias_ult_pago_cobro','Primera Mora','CICLO','Codigo Categoria','Subsegmento',
'dias_ult_pago_fac_ant','Fecha de cuenta de cobro mas antigua','Fecha estado corte','Fecha estado corte','Descripcion Gestion Resultado'],axis=1,inplace=True)
dd=fulll.copy()
dd['class_Cantidad_recontactos_mes']=dd['class_Cantidad_recontactos_mes'].fillna('0')
dd['class_Cantidad_de_cobros_diff_mes'].fillna('0',inplace=True)
# dd['Calificación Servicio Suscrito'][dd['Calificación Servicio Suscrito']==' '] = np.nan
# dd['Calificación Servicio Suscrito']=dd['Calificación Servicio Suscrito'].astype(float)
dd['Fecha de Asignacion']=pd.to_datetime(dd['Fecha de Asignacion'],format='%Y-%m-%d')
dd['Fecha Ult Gestion']=pd.to_datetime(dd['Fecha Ult Gestion'],format='%Y-%m-%d')
dd['Fecha Actualizacion']=pd.to_datetime(dd['Fecha Actualizacion'],format='%Y-%m-%d')
dd['Fecha Vencimiento Sin Recargo']=pd.to_datetime(dd['Fecha Vencimiento Sin Recargo'],format='%Y-%m-%d')
# dd['Fecha de cuenta de cobro mas antigua']=pd.to_datetime(dd['Fecha de cuenta de cobro mas antigua'],format='%Y-%m-%d')
dd['FECHA DE GESTION']=pd.to_datetime(dd['FECHA DE GESTION'],format='%Y-%m-%d %H:%M:%S')
dd['Fecha Debido Cobrar']= | pd.to_datetime(dd['Fecha Debido Cobrar'],format='%Y-%m-%d %H:%M:%S', errors='coerce') | pandas.to_datetime |
"""
test date_range, bdate_range construction from the convenience range functions
"""
from datetime import datetime, time, timedelta
import numpy as np
import pytest
import pytz
from pytz import timezone
from pandas._libs.tslibs import timezones
from pandas._libs.tslibs.offsets import BDay, CDay, DateOffset, MonthEnd, prefix_mapping
from pandas.errors import OutOfBoundsDatetime
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DatetimeIndex, Timestamp, bdate_range, date_range, offsets
import pandas._testing as tm
from pandas.core.arrays.datetimes import generate_range
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestTimestampEquivDateRange:
# Older tests in TestTimeSeries constructed their `stamp` objects
# using `date_range` instead of the `Timestamp` constructor.
# TestTimestampEquivDateRange checks that these are equivalent in the
# pertinent cases.
def test_date_range_timestamp_equiv(self):
rng = date_range("20090415", "20090519", tz="US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_dateutil(self):
rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")
stamp = rng[0]
ts = Timestamp("20090415", tz="dateutil/US/Eastern", freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_explicit_pytz(self):
rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"), freq="D")
assert ts == stamp
@td.skip_if_windows_python_3
def test_date_range_timestamp_equiv_explicit_dateutil(self):
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))
stamp = rng[0]
ts = Timestamp("20090415", tz=gettz("US/Eastern"), freq="D")
assert ts == stamp
def test_date_range_timestamp_equiv_from_datetime_instance(self):
datetime_instance = datetime(2014, 3, 4)
# build a timestamp with a frequency, since then it supports
# addition/subtraction of integers
timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]
ts = Timestamp(datetime_instance, freq="D")
assert ts == timestamp_instance
def test_date_range_timestamp_equiv_preserve_frequency(self):
timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]
ts = Timestamp("2014-03-05", freq="D")
assert timestamp_instance == ts
class TestDateRanges:
def test_date_range_nat(self):
# GH#11587
msg = "Neither `start` nor `end` can be NaT"
with pytest.raises(ValueError, match=msg):
date_range(start="2016-01-01", end=pd.NaT, freq="D")
with pytest.raises(ValueError, match=msg):
date_range(start=pd.NaT, end="2016-01-01", freq="D")
def test_date_range_multiplication_overflow(self):
# GH#24255
# check that overflows in calculating `addend = periods * stride`
# are caught
with tm.assert_produces_warning(None):
# we should _not_ be seeing a overflow RuntimeWarning
dti = date_range(start="1677-09-22", periods=213503, freq="D")
assert dti[0] == Timestamp("1677-09-22")
assert len(dti) == 213503
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("1969-05-04", periods=200000000, freq="30000D")
def test_date_range_unsigned_overflow_handling(self):
# GH#24255
# case where `addend = periods * stride` overflows int64 bounds
# but not uint64 bounds
dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")
dti2 = date_range(start=dti[0], periods=len(dti), freq="D")
assert dti2.equals(dti)
dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")
assert dti3.equals(dti)
def test_date_range_int64_overflow_non_recoverable(self):
# GH#24255
# case with start later than 1970-01-01, overflow int64 but not uint64
msg = "Cannot generate range with"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(start="1970-02-01", periods=106752 * 24, freq="H")
# case with end before 1970-01-01, overflow int64 but not uint64
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1969-11-14", periods=106752 * 24, freq="H")
def test_date_range_int64_overflow_stride_endpoint_different_signs(self):
# cases where stride * periods overflow int64 and stride/endpoint
# have different signs
start = Timestamp("2262-02-23")
end = Timestamp("1969-11-14")
expected = date_range(start=start, end=end, freq="-1H")
assert expected[0] == start
assert expected[-1] == end
dti = date_range(end=end, periods=len(expected), freq="-1H")
tm.assert_index_equal(dti, expected)
start2 = Timestamp("1970-02-01")
end2 = Timestamp("1677-10-22")
expected2 = date_range(start=start2, end=end2, freq="-1H")
assert expected2[0] == start2
assert expected2[-1] == end2
dti2 = date_range(start=start2, periods=len(expected2), freq="-1H")
tm.assert_index_equal(dti2, expected2)
def test_date_range_out_of_bounds(self):
# GH#14187
msg = "Cannot generate range"
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range("2016-01-01", periods=100000, freq="D")
with pytest.raises(OutOfBoundsDatetime, match=msg):
date_range(end="1763-10-12", periods=100000, freq="D")
def test_date_range_gen_error(self):
rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")
assert len(rng) == 4
@pytest.mark.parametrize("freq", ["AS", "YS"])
def test_begin_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],
freq=freq,
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["A", "Y"])
def test_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"], freq=freq
)
tm.assert_index_equal(rng, exp)
@pytest.mark.parametrize("freq", ["BA", "BY"])
def test_business_end_year_alias(self, freq):
# see gh-9313
rng = date_range("1/1/2013", "7/1/2017", freq=freq)
exp = DatetimeIndex(
["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"], freq=freq
)
tm.assert_index_equal(rng, exp)
def test_date_range_negative_freq(self):
# GH 11018
rng = date_range("2011-12-31", freq="-2A", periods=3)
exp = DatetimeIndex(["2011-12-31", "2009-12-31", "2007-12-31"], freq="-2A")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2A"
rng = date_range("2011-01-31", freq="-2M", periods=3)
exp = DatetimeIndex(["2011-01-31", "2010-11-30", "2010-09-30"], freq="-2M")
tm.assert_index_equal(rng, exp)
assert rng.freq == "-2M"
def test_date_range_bms_bug(self):
# #1645
rng = date_range("1/1/2000", periods=10, freq="BMS")
ex_first = Timestamp("2000-01-03")
assert rng[0] == ex_first
def test_date_range_normalize(self):
snap = datetime.today()
n = 50
rng = date_range(snap, periods=n, normalize=False, freq="2D")
offset = timedelta(2)
values = DatetimeIndex([snap + i * offset for i in range(n)], freq=offset)
tm.assert_index_equal(rng, values)
rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")
the_time = time(8, 15)
for val in rng:
assert val.time() == the_time
def test_date_range_fy5252(self):
dr = date_range(
start="2013-01-01",
periods=2,
freq=offsets.FY5253(startingMonth=1, weekday=3, variation="nearest"),
)
assert dr[0] == Timestamp("2013-01-31")
assert dr[1] == Timestamp("2014-01-30")
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start, end, periods=10, freq="s")
def test_date_range_convenience_periods(self):
# GH 20808
result = date_range("2018-04-24", "2018-04-27", periods=3)
expected = DatetimeIndex(
["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],
freq=None,
)
tm.assert_index_equal(result, expected)
# Test if spacing remains linear if tz changes to dst in range
result = date_range(
"2018-04-01 01:00:00",
"2018-04-01 04:00:00",
tz="Australia/Sydney",
periods=3,
)
expected = DatetimeIndex(
[
Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),
Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),
Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),
]
)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start,end,result_tz",
[
["20180101", "20180103", "US/Eastern"],
[datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],
[Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
"US/Eastern",
],
[
Timestamp("20180101", tz="US/Eastern"),
Timestamp("20180103", tz="US/Eastern"),
None,
],
],
)
def test_date_range_linspacing_tz(self, start, end, result_tz):
# GH 20983
result = date_range(start, end, periods=3, tz=result_tz)
expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected)
def test_date_range_businesshour(self):
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(["2014-07-04 16:00", "2014-07-07 09:00"], freq="BH")
rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="BH")
tm.assert_index_equal(idx, rng)
idx = DatetimeIndex(
[
"2014-07-04 09:00",
"2014-07-04 10:00",
"2014-07-04 11:00",
"2014-07-04 12:00",
"2014-07-04 13:00",
"2014-07-04 14:00",
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
"2014-07-08 11:00",
"2014-07-08 12:00",
"2014-07-08 13:00",
"2014-07-08 14:00",
"2014-07-08 15:00",
"2014-07-08 16:00",
],
freq="BH",
)
rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="BH")
tm.assert_index_equal(idx, rng)
def test_range_misspecified(self):
# GH #1095
msg = (
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000")
with pytest.raises(ValueError, match=msg):
date_range(periods=10)
with pytest.raises(ValueError, match=msg):
date_range(start="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(end="1/1/2000", freq="H")
with pytest.raises(ValueError, match=msg):
date_range(periods=10, freq="H")
with pytest.raises(ValueError, match=msg):
date_range()
def test_compat_replace(self):
# https://github.com/statsmodels/statsmodels/issues/3349
# replace should take ints/longs for compat
result = date_range(
Timestamp("1960-04-01 00:00:00", freq="QS-JAN"), periods=76, freq="QS-JAN"
)
assert len(result) == 76
def test_catch_infinite_loop(self):
offset = offsets.DateOffset(minute=5)
# blow up, don't loop forever
msg = "Offset <DateOffset: minute=5> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)
@pytest.mark.parametrize("periods", (1, 2))
def test_wom_len(self, periods):
# https://github.com/pandas-dev/pandas/issues/20517
res = date_range(start="20110101", periods=periods, freq="WOM-1MON")
assert len(res) == periods
def test_construct_over_dst(self):
# GH 20854
pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=True
)
pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(
"US/Pacific", ambiguous=False
)
expect_data = [
Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),
pre_dst,
pst_dst,
]
expected = DatetimeIndex(expect_data, freq="H")
result = date_range(start="2010-11-7", periods=3, freq="H", tz="US/Pacific")
tm.assert_index_equal(result, expected)
def test_construct_with_different_start_end_string_format(self):
# GH 12064
result = date_range(
"2013-01-01 00:00:00+09:00", "2013/01/01 02:00:00+09:00", freq="H"
)
expected = DatetimeIndex(
[
Timestamp("2013-01-01 00:00:00+09:00"),
Timestamp("2013-01-01 01:00:00+09:00"),
Timestamp("2013-01-01 02:00:00+09:00"),
],
freq="H",
)
tm.assert_index_equal(result, expected)
def test_error_with_zero_monthends(self):
msg = r"Offset <0 \* MonthEnds> did not increment date"
with pytest.raises(ValueError, match=msg):
date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
expected = DatetimeIndex([start + i * offset for i in range(5)], freq=offset)
tm.assert_index_equal(result, expected)
def test_range_tz_pytz(self):
# see gh-2906
tz = timezone("US/Eastern")
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz.zone == tz.zone
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize(
"start, end",
[
[
Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),
Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),
],
[
Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),
Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),
],
],
)
def test_range_tz_dst_straddle_pytz(self, start, end):
dr = date_range(start, end, freq="D")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(start, end, freq="D", tz="US/Eastern")
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
dr = date_range(
start.replace(tzinfo=None),
end.replace(tzinfo=None),
freq="D",
tz="US/Eastern",
)
assert dr[0] == start
assert dr[-1] == end
assert np.all(dr.hour == 0)
def test_range_tz_dateutil(self):
# see gh-2906
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas._libs.tslibs.timezones import maybe_get_tz
tz = lambda x: maybe_get_tz("dateutil/" + x)
start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))
end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))
dr = date_range(start=start, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(end=end, periods=3)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
dr = date_range(start=start, end=end)
assert dr.tz == tz("US/Eastern")
assert dr[0] == start
assert dr[2] == end
@pytest.mark.parametrize("freq", ["1D", "3D", "2M", "7W", "3H", "A"])
def test_range_closed(self, freq):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409, GH12684
begin = Timestamp("2011/1/1", tz="US/Eastern")
end = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
begin = Timestamp("2011/1/1")
end = Timestamp("2014/1/1")
begintz = Timestamp("2011/1/1", tz="US/Eastern")
endtz = Timestamp("2014/1/1", tz="US/Eastern")
for freq in ["1D", "3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq, tz="US/Eastern")
left = date_range(begin, end, closed="left", freq=freq, tz="US/Eastern")
right = date_range(begin, end, closed="right", freq=freq, tz="US/Eastern")
expected_left = left
expected_right = right
if endtz == closed[-1]:
expected_left = closed[:-1]
if begintz == closed[0]:
expected_right = closed[1:]
tm.assert_index_equal(expected_left, left)
tm.assert_index_equal(expected_right, right)
@pytest.mark.parametrize("closed", ["right", "left", None])
def test_range_closed_boundary(self, closed):
# GH#11804
right_boundary = date_range(
"2015-09-12", "2015-12-01", freq="QS-MAR", closed=closed
)
left_boundary = date_range(
"2015-09-01", "2015-09-12", freq="QS-MAR", closed=closed
)
both_boundary = date_range(
"2015-09-01", "2015-12-01", freq="QS-MAR", closed=closed
)
expected_right = expected_left = expected_both = both_boundary
if closed == "right":
expected_left = both_boundary[1:]
if closed == "left":
expected_right = both_boundary[:-1]
if closed is None:
expected_right = both_boundary[1:]
expected_left = both_boundary[:-1]
tm.assert_index_equal(right_boundary, expected_right)
tm.assert_index_equal(left_boundary, expected_left)
tm.assert_index_equal(both_boundary, expected_both)
def test_years_only(self):
# GH 6961
dr = date_range("2014", "2015", freq="M")
assert dr[0] == datetime(2014, 1, 31)
assert dr[-1] == datetime(2014, 12, 31)
def test_freq_divides_end_in_nanos(self):
# GH 10885
result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min")
result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min")
expected_1 = DatetimeIndex(
["2005-01-12 10:00:00", "2005-01-12 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
expected_2 = DatetimeIndex(
["2005-01-13 10:00:00", "2005-01-13 15:45:00"],
dtype="datetime64[ns]",
freq="345T",
tz=None,
)
tm.assert_index_equal(result_1, expected_1)
tm.assert_index_equal(result_2, expected_2)
def test_cached_range_bug(self):
rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))
assert len(rng) == 50
assert rng[0] == datetime(2010, 9, 1, 5)
def test_timezone_comparaison_bug(self):
# smoke test
start = Timestamp("20130220 10:00", tz="US/Eastern")
result = date_range(start, periods=2, tz="US/Eastern")
assert len(result) == 2
def test_timezone_comparaison_assert(self):
start = Timestamp("20130220 10:00", tz="US/Eastern")
msg = "Inferred time zone not equal to passed time zone"
with pytest.raises(AssertionError, match=msg):
date_range(start, periods=2, tz="Europe/Berlin")
def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):
# GH 23270
tz = tz_aware_fixture
result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)
expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[
::-1
]
tm.assert_index_equal(result, expected)
class TestDateRangeTZ:
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")
# it works!
dr.hour
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")
assert (dr.hour == 0).all()
dr = date_range("2012-11-02", periods=10, tz=tzstr)
result = dr.hour
expected = pd.Index([0] * 10)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range("1/1/2000", periods=10, tz=tzstr)
expected = date_range("1/1/2000", periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
from pandas.tests.indexes.datetimes.test_timezones import fixed_off_no_name
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = pd.Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp("3/11/2012 05:00", tz=tzstr)
assert stamp.hour == 5
rng = date_range("3/11/2012 04:00", periods=10, freq="H", tz=tzstr)
assert stamp == rng[1]
class TestGenRangeGeneration:
def test_generate(self):
rng1 = list(generate_range(START, END, offset=BDay()))
rng2 = list(generate_range(START, END, offset="B"))
assert rng1 == rng2
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=CDay()))
rng2 = list(generate_range(START, END, offset="C"))
assert rng1 == rng2
def test_1(self):
rng = list(generate_range(start=datetime(2009, 3, 25), periods=2))
expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]
assert rng == expected
def test_2(self):
rng = list(generate_range(start=datetime(2008, 1, 1), end=datetime(2008, 1, 3)))
expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]
assert rng == expected
def test_3(self):
rng = list(generate_range(start=datetime(2008, 1, 5), end=datetime(2008, 1, 6)))
expected = []
assert rng == expected
def test_precision_finer_than_offset(self):
# GH#9907
result1 = date_range(
start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="Q"
)
result2 = date_range(
start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"
)
expected1_list = [
"2015-06-30 00:00:03",
"2015-09-30 00:00:03",
"2015-12-31 00:00:03",
"2016-03-31 00:00:03",
]
expected2_list = [
"2015-04-19 00:00:03",
"2015-04-26 00:00:03",
"2015-05-03 00:00:03",
"2015-05-10 00:00:03",
"2015-05-17 00:00:03",
"2015-05-24 00:00:03",
"2015-05-31 00:00:03",
"2015-06-07 00:00:03",
"2015-06-14 00:00:03",
"2015-06-21 00:00:03",
]
expected1 = DatetimeIndex(
expected1_list, dtype="datetime64[ns]", freq="Q-DEC", tz=None
)
expected2 = DatetimeIndex(
expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None
)
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
dt1, dt2 = "2017-01-01", "2017-01-01"
tz1, tz2 = "US/Eastern", "Europe/London"
@pytest.mark.parametrize(
"start,end",
[
(Timestamp(dt1, tz=tz1), Timestamp(dt2)),
(Timestamp(dt1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),
(Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),
],
)
def test_mismatching_tz_raises_err(self, start, end):
# issue 18488
msg = "Start and end cannot both be tz-aware with different timezones"
with pytest.raises(TypeError, match=msg):
date_range(start, end)
with pytest.raises(TypeError, match=msg):
date_range(start, end, freq=BDay())
class TestBusinessDateRange:
def test_constructor(self):
bdate_range(START, END, freq=BDay())
bdate_range(START, periods=20, freq=BDay())
bdate_range(end=START, periods=20, freq= | BDay() | pandas._libs.tslibs.offsets.BDay |
import pandas as pd
import numpy as np
import re
import json
#from tkinter.ttk import Separator
def parse_thermo_vcf(vcf,excel):
''' Les inn vcf og excel, slå de sammen til en pandas dataframe'''
df_vcf = pd.read_csv(vcf, sep="\t", comment='#', names=["CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","GT"])
df_excel = pd.read_excel(excel)
df1 = pd.DataFrame()
df2 = pd.DataFrame()
# With fusion
df_excel_w = df_excel.loc[df_excel['Type'] == 'Fusion']
if not df_excel_w.empty:
#df_excel_w.loc[:,'ID'] = df_excel_w.loc[:,'Variant ID'] + "_1"
df_excel_w = df_excel_w.assign(ID = df_excel_w.loc[:,'Variant ID'] + "_1")
df1 = pd.merge(df_excel_w,df_vcf,on='ID',how='left')
df1.loc[:,'ID']=df1.loc[:,'Variant ID']
# Without fusion
df_excel_wo = df_excel.loc[df_excel['Type'] != 'Fusion']
if not df_excel_wo.empty:
df_excel_wo = df_excel_wo.reset_index(drop='True')
df_vcf["Locus_vcf"] = df_vcf.CHROM.astype(str)+":" \
+df_vcf.POS.astype(str)
#df_vcf.Locus = df_vcf.Locus.astype(str)
#df_excel_wo.Locus.astype(str)
df2 = pd.merge(df_excel_wo,df_vcf,\
left_on=['Locus'],right_on=['Locus_vcf'],how='left')
df2 = df2.drop(columns=['Locus_vcf'])
df = pd.concat([df1,df2])
df = df.reset_index(drop='True')
df = df.rename(columns={'ALT':'ALTEND'})
# Removing columns TYPE and SVTYPE (already specifiec in column Type)
df = df.drop(columns=['TYPE','SVTYPE', 'Gene', 'Locus', \
'AA Change', 'Ref','Alt','Raw Read Depth', \
'Effective Read Depth', 'Alt Allele Read Counts',\
'Allele Ratio', 'Nuc Change', 'Allele Frequency', \
'Allele Frequency (%)', 'Filtered Read Coverage', \
'Allele read Count'], errors='ignore')
return df
def filter_nocalls(df):
''' Fjern varianter som begynner med 0/0 eller ./.'''
dfOut = df[ (~(df['GT'].str.startswith('./.'))) & (~(df['GT'].str.startswith('0/0'))) ]
return dfOut
def explode_format_gt(df):
'''
Eksploderer GT OG FORMAT-kolonnene til egne kolonner
#1: Følgende kolonner finnes to steder, så de fjernes fra format: AF,AO,DP,FAO,FDP,FRO,FSAF,FSAR,FSRF,FSRR,RO,SAF,SAR,SRF,SRR
'''
df.reset_index(inplace=True,drop=True)
df.rename(columns = {'GT':'GTFORMAT'}, inplace = True)
ny = pd.DataFrame(list(dict(zip(a,b)) for a,b in zip(df['FORMAT'].str.split(":"), df['GTFORMAT'].str.split(":"))))
for i in ["AF","AO","DP","FAO","FDP","FRO","FSAF","FSAR","FSRF","FSRR","RO","SAF","SAR","SRF","SRR"]: #1
try:
ny = ny.drop(i, axis=1)
except:
pass
dfOut = pd.concat([df, ny], axis=1)
del dfOut["GTFORMAT"]
del dfOut["FORMAT"]
return dfOut
def replace_semi(inputstring):
''' Hvor det finnes semikolon inne i curly braces i INFO-kolonnen gjoeres denne om til underscore - ellers blir det feilmelding'''
return re.sub(r"{[^{}]+}", lambda x: x.group(0).replace(";", "_"), inputstring)
def explode_info(df):
''' Eksploderer INFO-kolonnen til egne kolonner'''
df.reset_index(inplace=True,drop=True)
# First maa vi fjerne alle semikolon som er inne i klammer:
df["INFO"] = df["INFO"].apply(replace_semi)
# Replace HS MED HS=NA
# ok???? ';HS;',';HS=NA;' --> 'HS;','HS=NA;' /mfahls
df['INFO'] = df['INFO'].str.replace('HS;','HS=NA;')
df['INFO'] = df['INFO'].str.replace('Non-Targeted;','Non-Targeted=1;')
ny2 = pd.DataFrame([dict(w.split('=', 1) for w in x) for x in df["INFO"].str.split(";")])
dfOut = pd.concat([df, ny2], axis=1)
del dfOut["INFO"]
if 'END' in dfOut.columns:
dfOut['ALTEND'] = dfOut['ALTEND'].replace('<CNV>',np.nan)
temp1 = dfOut.ALTEND
temp2 = dfOut.END
dfOut.ALTEND = temp1.combine_first(temp2)
return dfOut
def explode_func(df):
''' Eksploderer FUNC-kolonnen til egne kolonner
Hvis flere transcripter/gener per variant blir disse numerert'''
row_count = len(df.index)
dfOut = pd.DataFrame()
for row in range(row_count):
dftemp2 = | pd.DataFrame() | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from math import sqrt
import numba
import hpat
from hpat.tests.test_utils import (count_array_REPs, count_parfor_REPs,
count_parfor_OneDs, count_array_OneDs,
count_parfor_OneD_Vars, count_array_OneD_Vars,
dist_IR_contains)
from datetime import datetime
import random
class TestDate(unittest.TestCase):
@unittest.skip("needs support for boxing/unboxing DatetimeIndex")
def test_datetime_index_in(self):
def test_impl(dti):
return dti
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
dti = pd.DatetimeIndex(df['str_date'])
np.testing.assert_array_equal(hpat_func(dti).values, test_impl(dti).values)
def test_datetime_index(self):
def test_impl(df):
return pd.DatetimeIndex(df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_index_kw(self):
def test_impl(df):
return pd.DatetimeIndex(data=df['str_date']).values
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
np.testing.assert_array_equal(hpat_func(df), test_impl(df))
def test_datetime_arg(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = pd.DatetimeIndex(df['str_date']).to_series()
np.testing.assert_array_equal(hpat_func(A), test_impl(A))
def test_datetime_getitem(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
df = self._gen_str_date_df()
A = | pd.DatetimeIndex(df['str_date']) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 7 14:21:28 2021
@author: angus
"""
import streamlit as st
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
import numpy as np
import investpy
import base64
import yfinance as yf
import datetime as dt
from io import BytesIO
st.set_page_config(layout="wide")
st.title('HKEX IPO Performance')
st.write ('All assumptions and further info can be found in [documentation](https://github.com/epiphronquant/HKEX-IPO-app)')
df = pd.read_excel(r'RawData.xlsx')
df_export = df
df = df.loc[df['Count as IPO?'] == 1] ### Filters rows where it is actually an IPO
df['Listing Date▼']= pd.to_datetime(df['Listing Date▼'])### converts listing date to datetime variable
### create dropdown selector
column_1, column_2, column_3 = st.columns(3) ### Divides page into 2 columns
with column_1:
language = st.selectbox(
'Which language should the company names be?',
['English', '中文'])
'You selected: ', language
if language =='中文':
df['Name'] = df['Name CN']
else:
pass
with column_2:### Chart of distribution and Lead 1 Chart
sectors = df['Sector']
sectors = sectors.tolist()
sectors = list(dict.fromkeys(sectors))
healthcare = sectors [0]
sectors.append('All')### adds an option for All IPOs
sectors = sorted(sectors [1:])
sectors.insert(0, healthcare)
sector = st.selectbox(
'Which sector are you interested in?',
sectors)
'You selected: ', sector
if sector == 'All':
df = df[(df['Listing Date▼'] >= '2019-01-01')] ### healthcare data runs from 2018 while all IPO data runs from 2019
else:
df = df.loc[df['Sector'] == sector]
with column_3:
### Dropdown box for median or mean
central_tendancy = ['Average', 'Median']
select_central = st.selectbox(
'Average or Median?',
central_tendancy)
'You selected: ', select_central
### add a slider to filter data by dates
format = 'MMM DD, YYYY' # format output
start_date = df ['Listing Date▼'].iloc [0]
start_date = start_date.date()
end_date = df ['Listing Date▼'].iloc [-1]
end_date = end_date.date()
slider = st.slider('Select date', min_value=start_date, value=(start_date,end_date) ,max_value=end_date, format=format)
start_date = slider [0].strftime('%Y%m%d')
end_date = slider [1].strftime('%Y%m%d')
def clean_time (date): ### presents selected slider time in the same format as the slider
a = date.ctime()
a = a.split()
a = a[1] + ' '+ a[2] + ', '+ a[-1]
return a
st.info('Start: **%s** End: **%s**' % (clean_time(slider[0]),clean_time(slider[1]))) ### info bar
df = df[(df['Listing Date▼'] >= start_date) & (df['Listing Date▼'] <= end_date)] ### filter the data
### create charts
def lead_chart(x, y1, y2, title):
fig = go.Figure(
data=[go.Bar(name='Count', x=x, y=y1, yaxis='y', offsetgroup=1),
go.Bar(name='% Chg Debut', x=x, y=y2, yaxis='y2', offsetgroup=2)],
layout={'yaxis': {'title': 'Count'},
'yaxis2': {'title': '% Chg Debut', 'overlaying': 'y', 'side': 'right', 'tickformat': ',.0%'}})
fig.update_layout(barmode='group',title={'text': title})
fig.update_xaxes(categoryorder='max descending')
return fig
### Charts for normal distribution, industry performance, lead 1, lead 2
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:### Chart of distribution and Lead 1 Chart
### Chart of distribution
x1 = df ['% Chg. on2Debut▼']
x1 = x1.tolist()
x1 = [x1]
label = '% Chg. on Debut'
label = [label]
names = df['Name']
names = names.tolist()
names = [names]
fig = ff.create_distplot(x1, label, rug_text = names, bin_size = .2)
fig.update_layout( xaxis_tickformat = ',.2%',title={'text': "Normal Distribution Plot and Rugplot for First Day Return"})
st.plotly_chart(fig)
#### Lead 1 Chart
lead1 = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Lead 1', 'Listing Date▼']]
a = lead1.groupby(['Lead 1']).count() ### gathers data by Lead 1
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼'] ### data column that shows deal count
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead1.groupby(['Lead 1']).mean()
else:
b = lead1.groupby(['Lead 1']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = lead_chart(industries, a, b,"Lead 1 Deal Count and " + select_central + " First Day Return" )
st.plotly_chart(fig)
with column_2:
### chart of industry performance and Lead 2 Chart
industry = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Listing Date▼']]
a = industry.groupby(['Industry']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = industry.groupby(['Industry']).mean()
else:
b = industry.groupby(['Industry']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = go.Figure(
data=[
go.Bar(name='Count', x=industries, y=a, yaxis='y', offsetgroup=1),
go.Bar(name='% Chg Debut', x=industries, y=b, yaxis='y2', offsetgroup=2)],
layout={
'yaxis': {'title': 'Count'},
'yaxis2': {'title': '% Chg Debut', 'overlaying': 'y', 'side': 'right', 'tickformat': ',.0%'}
})
fig.update_layout(barmode='group',legend=dict(yanchor="top",y=1,xanchor="right",x=1.35),
title={'text': "Industry Deal Count and " + select_central + " First Day Return"})
fig.update_xaxes(categoryorder='max descending')
st.plotly_chart(fig)
#### Lead 2 Chart
lead2 = df [['% Chg. on2Debut▼', 'Industry', 'Name', 'Lead 2', 'Listing Date▼']]
a = lead2.groupby(['Lead 2']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead2.groupby(['Lead 2']).mean()
else:
b = lead2.groupby(['Lead 2']).median()
b = b['% Chg. on2Debut▼'].to_list()
fig = lead_chart(industries, a, b,"Lead 2 Deal Count and " + select_central+ " First Day Return" )
st.plotly_chart(fig)
### Charts for Lead 1&2 Performance
#### combine lead 1 with lead 2
lead12 = df [['% Chg. on2Debut▼', 'Industry', 'Name','Lead 1', 'Lead 2', 'Listing Date▼']]
lead12 ['Lead 1 & 2'] = df ['Lead 1'] + ' & ' + df['Lead 2']
a = lead12.groupby(['Lead 1 & 2']).count()
industries = a.index
industries = industries.tolist()
a = a['% Chg. on2Debut▼']
a = a.rename('Count')
a = a.to_list()
if select_central == 'Average':
b = lead12.groupby(['Lead 1 & 2']).mean()
else:
b = lead12.groupby(['Lead 1 & 2']).median()
# b = lead12.groupby(['Lead 1 & 2']).mean()
b = b['% Chg. on2Debut▼'].to_list()
### graph
fig = lead_chart(industries, a, b,"Lead 1 & 2 Deal Count and "+ select_central+" First Day Return" )
st.plotly_chart(fig, use_container_width=True)
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:
### Chart showing first day return performance over time with HSH and HSI
#add a box to select Chg on debut or -1 trading day as primary axis
ret = st.selectbox(
'Which return would you like to analyse?',
['Chg on Debut', 'Return till Today'])
'You selected: ', ret
if ret == 'Chg on Debut':
fdayret = df [['Listing Date▼','% Chg. on2Debut▼']]
else:
fdayret = df [['Listing Date▼','-1 Trading Days']]
# fdayret = df [['Listing Date▼','% Chg. on2Debut▼']]
if select_central == 'Average':
a = fdayret.groupby(['Listing Date▼']).mean()
else:
a = fdayret.groupby(['Listing Date▼']).median()
fig = make_subplots(specs=[[{"secondary_y": True}]])
with column_2:
#add a box to select HSI or HSH as second axis
index = st.selectbox(
'Which index would you like to compare it to?',
['Hang Seng Healthcare', 'Hang Seng Index'])
'You selected: ', index
### Download HSH and HSI data
today = pd.to_datetime('today').strftime('%d/%m/%Y')
start = a.index[0].strftime('%d/%m/%Y')
end = a.index[-1].strftime('%d/%m/%Y')
if index == 'Hang Seng Index':
df_index = investpy.get_index_historical_data(index='Hang Seng',
country='hong kong',
from_date= start,
to_date= end)
else:
df_index = investpy.get_index_historical_data(index='hs healthcare',
country='hong kong',
from_date= start,
to_date= end)
# Add traces
if ret == 'Chg on Debut':
fig.add_trace(go.Scatter(x= a.index, y= a['% Chg. on2Debut▼'], name= ret),
secondary_y=False)
else:
# fdayret = df [['Listing Date▼','-1 HSI Days']]
fig.add_trace(go.Scatter(x= a.index, y= a['-1 Trading Days'], name= ret),
secondary_y=False)
fig.add_trace(go.Scatter(x = df_index.index, y= df_index['Close'], name= index),
secondary_y=True)
# Add figure title
fig.update_layout(title_text= ret + " with Index Level")
# Set x-axis title
fig.update_xaxes(title_text="Date")
# Set y-axes titles
fig.update_yaxes(title_text= ret, secondary_y=False)
fig.update_yaxes(title_text="Index Level", secondary_y=True)
fig.layout.yaxis.tickformat= ',.2%'
# fig.show()
st.plotly_chart(fig, use_container_width=True)
#### trading performance's chart
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1:
last_x = st.number_input('Number of most recent IPOs to display', value = 10)
last_x = int(last_x)
@st.cache(ttl = 1800)
def chart_7(df):
### add chart showing last 10 IPOs and their detailed trading performances
## gather the last 10 tickers and stock names and listing date and price
df_10 = df [-last_x:]
df_10 = df_10 [['Name', 'Code', 'Listing Price', 'Listing Date▼']]
## Gather stock codes and tickers
df_yf = df_10 ['Code']
df_tickers = df_yf.tolist()
df_yf = yf.download(df_tickers)
df_yf = df_yf ['Close']
df_yf = df_yf [df_tickers] ## reorder the columns so it is in the order as inputted
df_name = df_10 ['Name']
df_name = df_name.tolist()
df_yf.columns = df_name ## rename the column codes to column names
df_yf8 = pd.DataFrame()
for name in df_name:
df_yf2 = df_yf [name]
df_yf2 = df_yf2.dropna()
list_date = df_10.loc [df_10['Name'] == name]
list_date = list_date ['Listing Date▼'].values
list_date = list_date [0]
df_yf2 = df_yf2.reset_index()
df_yf2 = df_yf2[(df_yf2['Date'] >= list_date)]
df_yf2 = df_yf2.set_index('Date')
df_yf2=df_yf2.iloc[:,0]
price = df_10.loc [df_10['Name'] == name]
price = price ['Listing Price'].values
price = price [0]
df_yf2 = df_yf2 / price -1
date = df_yf2.index [0]
date = date - dt.timedelta(days=1)
ser = pd.Series(data= {date : 0}, index=[date], name = 'Stock Name')# df_yf2 = df_yf2.append()
ser = ser.append(df_yf2)
ser = pd.DataFrame(ser, columns = ['Close'])
ser ['Stock Name'] = name
df_yf8 = df_yf8.append(ser)
df_yf8 = df_yf8.reset_index()
df_yf8 = df_yf8.rename({'index':'Date', 'Close':'Return'}, axis = 'columns')
markers = df_yf8 ['Date'].min() ### display of markers settings
markers = markers.date()
markers = markers > dt.date.today() - dt.timedelta(days=120)
return df_yf8, markers
df_yf8, markers = chart_7(df)
fig = px.line(df_yf8, x= 'Date', y= 'Return', color = 'Stock Name', title= 'Last ' + str(last_x)+' ' + sector + ' IPOs Return Post IPO', markers = markers)
fig.layout.yaxis.tickformat = ',.0%'
st.plotly_chart(fig)
with column_2:
### customizable chart for displaying various IPOs
names = st.text_input('Type in names of stock/stocks that have IPOd in the past 3 years. e.g TRANSCENTA-B, bioheart-b')
@st.cache(ttl = 1800)
def chart_8(names):
names = names.split(',')
names = map(str.strip, names)
names = map(str.upper, names)
names = list(names)
tickers = []
for name in names:
# name = 'SENSETIME-W'
ticker = df.loc [df['Name'] == name]
ticker = ticker ['Code'].values
ticker = ticker [0]
tickers.append(ticker)
df_yf = yf.download(tickers) ['Close']
if len(tickers) ==1:
df_yf = df_yf.rename(names[0])
else:
df_yf = df_yf [tickers] ## reorder the columns so it is in the order as inputted
df_yf.columns = names ## rename the column codes to column names
df_yf8 = pd.DataFrame()
for name in names:
if len(tickers) ==1:
df_yf2 = df_yf
else:
df_yf2 = df_yf [name] ## reorder the columns so it is in the order as inputted
df_yf2 = df_yf2.dropna()
list_date = df.loc [df['Name'] == name]
list_date = list_date ['Listing Date▼'].values
list_date = list_date [0]
df_yf2 = df_yf2.reset_index()
df_yf2 = df_yf2[(df_yf2['Date'] >= list_date)]
df_yf2 = df_yf2.set_index('Date')
df_yf2=df_yf2.iloc[:,0]
price = df.loc [df['Name'] == name]
price = price ['Listing Price'].values
price = price [0]
df_yf2 = df_yf2 / price -1
date = df_yf2.index [0]
date = date - dt.timedelta(days=1)
ser = pd.Series(data= {date : 0}, index=[date], name = 'Stock Name')# df_yf2 = df_yf2.append()
ser = ser.append(df_yf2)
ser = pd.DataFrame(ser, columns = ['Close'])
ser ['Stock Name'] = name
df_yf8 = df_yf8.append(ser)
df_yf8 = df_yf8.reset_index()
df_yf8 = df_yf8.rename({'index':'Date', 'Close':'Return'}, axis = 'columns')
markers = df_yf8 ['Date'].min() ### display of markers settings
markers = markers.date()
markers = markers > dt.date.today() - dt.timedelta(days=120)
return df_yf8, markers
if names == '':
pass
else:
df_yf8, markers = chart_8(names)
fig = px.line(df_yf8, x= 'Date', y= 'Return', color = 'Stock Name', title= 'Selected '+sector + ' IPOs Return Post IPO', markers = markers)
fig.layout.yaxis.tickformat = ',.0%'
st.plotly_chart(fig)
### Charts for Trading Day return by benchmark and by industry
###### to better display the data, 480 trading days is used as the return till today
st.subheader ('480 Trading Days post IPO is used as a placeholder for the return till Today')
column_1, column_2 = st.columns(2) ### Divides page into 2 columns
with column_1: ### trading day performance compared to HSI and HSH
### gather the data columns
comps = df [["0 Trading Days", "80 Trading Days", "100 Trading Days", "120 Trading Days", "140 Trading Days", "160 Trading Days", "252 Trading Days", "372 Trading Days", "-1 Trading Days"]]
compsHSI = df[["80 HSI Days", "100 HSI Days", "120 HSI Days", "140 HSI Days", "160 HSI Days", "252 HSI Days", "372 HSI Days","-1 HSI Days"
]]
compsHSH = df [["80 HSH Days", "100 HSH Days", "120 HSH Days", "140 HSH Days", "160 HSH Days", "252 HSH Days", "372 HSH Days","-1 HSH Days"]]
### find central tendancy using selected option
if select_central == 'Average':
comps = comps.mean()
compsHSI = compsHSI.mean()
compsHSH = compsHSH.mean()
else:
comps = comps.median()
compsHSI = compsHSI.median()
compsHSH = compsHSH.median()
### add 0's for compsHSI and compsHSH because there is only data for market close on the day
a = pd.Series(data = [0], index = ['0 HSI Days'])
compsHSI = a.append(compsHSI)
a = pd.Series(data = [0], index = ['0 HSH Days'])
compsHSH = a.append(compsHSH)
## Convert the numerous series to one dataframe that has multiple columns
def clean_comps(comps):
comps = comps.to_frame()
comps = comps.reset_index()
comps2 = comps ['index']
comps2 = comps2.str.split(expand = True)
comps ['Trading Days since IPO'] = comps2.iloc [:,0:1]
comps3 = comps2[comps2.columns[1]] +' '+ comps2[comps2.columns[2]]
comps ['Benchmark'] = comps3
return comps
comps = clean_comps(comps)
comps = comps.append(clean_comps(compsHSI))
comps = comps.append(clean_comps(compsHSH))
### clean the dataframe
comps ['Trading Days since IPO'] = pd.to_numeric(comps ['Trading Days since IPO'])
comps = comps.rename( columns = {comps.columns[1]: 'return'})
comps['Trading Days since IPO'] = comps ['Trading Days since IPO'].replace (-1, 480)
### Graph it onto a chart
fig = px.line(comps, x= 'Trading Days since IPO', y= 'return', color = 'Benchmark', title= select_central + ' Trading Day Return Post IPO by Benchmark', markers = True)
fig.layout.yaxis.tickformat = ',.0%'
st.plotly_chart(fig)
with column_2:### trading day performance by industry
### gather the data columns
comps = df [['Industry',"0 Trading Days", "80 Trading Days", "100 Trading Days", "120 Trading Days", "140 Trading Days", "160 Trading Days", "252 Trading Days", "372 Trading Days", "-1 Trading Days"]]
compsHSI = df[['Industry',"80 HSI Days", "100 HSI Days", "120 HSI Days", "140 HSI Days", "160 HSI Days", "252 HSI Days", "372 HSI Days","-1 HSI Days"
]]
compsHSH = df [['Industry',"80 HSH Days", "100 HSH Days", "120 HSH Days", "140 HSH Days", "160 HSH Days", "252 HSH Days", "372 HSH Days","-1 HSH Days"]]
### for selected option, calculate the central tendancy
if select_central == 'Average':
comps = comps.groupby(['Industry']).mean()
else:
comps = comps.groupby(['Industry']).median()
comps = comps.reset_index()
industries = comps['Industry']
comps1 = | pd.DataFrame() | pandas.DataFrame |
import argparse
import datetime
import os
import shutil
import unittest
from unittest import mock
import pandas
from matrix.common import date
from matrix.common.request.request_tracker import Subtask
from matrix.common.query.cell_query_results_reader import CellQueryResultsReader
from matrix.common.query.feature_query_results_reader import FeatureQueryResultsReader
from matrix.docker.matrix_converter import main, MatrixConverter, SUPPORTED_FORMATS
from matrix.docker.query_runner import QueryType
class TestMatrixConverter(unittest.TestCase):
def setUp(self):
self.test_manifest = {
"columns": ["a", "b", "c"],
"part_urls": ["A", "B", "C"],
"record_count": 5
}
args = ["test_id", "test_exp_manifest", "test_cell_manifest",
"test_gene_manifest", "test_target", "loom", "."]
parser = argparse.ArgumentParser()
parser.add_argument("request_id")
parser.add_argument("expression_manifest_key")
parser.add_argument("cell_metadata_manifest_key")
parser.add_argument("gene_metadata_manifest_key")
parser.add_argument("target_path")
parser.add_argument("format", choices=SUPPORTED_FORMATS)
parser.add_argument("working_dir")
self.args = parser.parse_args(args)
self.matrix_converter = MatrixConverter(self.args)
@mock.patch("os.remove")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.creation_date", new_callable=mock.PropertyMock)
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_request")
@mock.patch("matrix.common.request.request_tracker.RequestTracker.complete_subtask_execution")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._upload_converted_matrix")
@mock.patch("matrix.docker.matrix_converter.MatrixConverter._to_loom")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test_run(self,
mock_parse_manifest,
mock_to_loom,
mock_upload_converted_matrix,
mock_subtask_exec,
mock_complete_request,
mock_creation_date,
mock_os_remove):
mock_parse_manifest.return_value = self.test_manifest
mock_creation_date.return_value = date.to_string(datetime.datetime.utcnow())
mock_to_loom.return_value = "local_matrix_path"
self.matrix_converter.run()
mock_manifest_calls = [
mock.call("test_cell_manifest"),
mock.call("test_exp_manifest"),
mock.call("test_gene_manifest")
]
mock_parse_manifest.assert_has_calls(mock_manifest_calls)
mock_to_loom.assert_called_once()
mock_subtask_exec.assert_called_once_with(Subtask.CONVERTER)
mock_complete_request.assert_called_once()
mock_upload_converted_matrix.assert_called_once_with("local_matrix_path", "test_target")
@mock.patch("s3fs.S3FileSystem.open")
def test__n_slices(self, mock_open):
manifest_file_path = "tests/functional/res/cell_metadata_manifest"
with open(manifest_file_path) as f:
mock_open.return_value = f
self.matrix_converter.query_results = {
QueryType.CELL: CellQueryResultsReader("test_manifest_key")
}
self.assertEqual(self.matrix_converter._n_slices(), 8)
def test__make_directory(self):
self.assertEqual(os.path.isdir('test_target'), False)
results_dir = self.matrix_converter._make_directory()
self.assertEqual(os.path.isdir('test_target'), True)
shutil.rmtree(results_dir)
def test__zip_up_matrix_output(self):
results_dir = self.matrix_converter._make_directory()
shutil.copyfile('LICENSE', './test_target/LICENSE')
path = self.matrix_converter._zip_up_matrix_output(results_dir, ['LICENSE'])
self.assertEqual(path, './test_target.zip')
os.remove('./test_target.zip')
@mock.patch("pandas.DataFrame.to_csv")
@mock.patch("matrix.common.query.feature_query_results_reader.FeatureQueryResultsReader.load_results")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test__write_out_gene_dataframe__with_compression(self, mock_parse_manifest, mock_load_results, mock_to_csv):
self.matrix_converter.query_results = {
QueryType.FEATURE: FeatureQueryResultsReader("test_manifest_key")
}
results_dir = self.matrix_converter._make_directory()
mock_load_results.return_value = pandas.DataFrame()
results = self.matrix_converter._write_out_gene_dataframe(results_dir, 'genes.csv.gz', compression=True)
self.assertEqual(type(results).__name__, 'DataFrame')
mock_load_results.assert_called_once()
mock_to_csv.assert_called_once_with('./test_target/genes.csv.gz',
compression='gzip',
index_label='featurekey',
sep='\t')
shutil.rmtree(results_dir)
@mock.patch("pandas.DataFrame.to_csv")
@mock.patch("matrix.common.query.feature_query_results_reader.FeatureQueryResultsReader.load_results")
@mock.patch("matrix.common.query.query_results_reader.QueryResultsReader._parse_manifest")
def test__write_out_gene_dataframe__without_compression(self, mock_parse_manifest, mock_load_results, mock_to_csv):
self.matrix_converter.query_results = {
QueryType.FEATURE: FeatureQueryResultsReader("test_manifest_key")
}
results_dir = self.matrix_converter._make_directory()
mock_load_results.return_value = pandas.DataFrame()
results = self.matrix_converter._write_out_gene_dataframe(results_dir, 'genes.csv', compression=False)
self.assertEqual(type(results).__name__, 'DataFrame')
mock_load_results.assert_called_once()
mock_to_csv.assert_called_once_with('./test_target/genes.csv', index_label='featurekey')
shutil.rmtree(results_dir)
@mock.patch("pandas.DataFrame.reindex")
@mock.patch("pandas.DataFrame.to_csv")
def test__write_out_cell_dataframe__with_compression(self, mock_to_csv, mock_reindex):
mock_reindex.return_value = pandas.DataFrame()
results_dir = './test_target'
results = self.matrix_converter._write_out_cell_dataframe(results_dir,
'cells.csv.gz',
| pandas.DataFrame() | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import gzip
# +
#Get the compound - virus interaction network
compound_viral_protein_info = pd.read_csv("Compound_Viral_proteins_Network.csv",header='infer',sep="\t")
#Get rows with missing pChembl Values and remove them
bool_series = pd.notnull(compound_viral_protein_info['pchembl_value'])
print("Missing Chembl Items: ",sum(pd.isnull(compound_viral_protein_info['pchembl_value'])))
subset0_compound_viral_protein_info = compound_viral_protein_info[bool_series]
print("Shape initially: ",subset0_compound_viral_protein_info.shape)
#Select compound viral interactions with standard type IC50, Ki, Kd
subset1_compound_viral_protein_info = subset0_compound_viral_protein_info.loc[subset0_compound_viral_protein_info['standard_type'].isin(['Potency','IC50','Ki','Kd'])]
print("Shape after standard type selection: ",subset1_compound_viral_protein_info.shape)
#Fix the smiles to be in standard representation
smiles_info = subset1_compound_viral_protein_info[["standard_inchi_key","canonical_smiles"]].values.tolist()
smiles_info = list(set(tuple(x) for x in smiles_info))
canonical_smiles_list, inchikeys_smiles_list = [],[]
for i in range(len(smiles_info)):
canonical_smiles_list.append(smiles_info[i][1])
inchikeys_smiles_list.append(smiles_info[i][0])
#Remove compounds which have salt bridges and non-molecules
res1,res2 = [],[]
for i in range(len(canonical_smiles_list)):
smiles = canonical_smiles_list[i]
inchikey = inchikeys_smiles_list[i]
m = Chem.MolFromSmiles(smiles)
if m is not None:
rev_smiles = Chem.MolToSmiles(m,isomericSmiles=False)
#If molecule has salt bridge remove it
if ('.' not in rev_smiles):
res1.append(rev_smiles)
res2.append(inchikey)
#Use only compounds whose length is less than 128
to_use_compound_list = [x for x in res1 if len(x)<=128 and len(x)>=10]
to_use_ids = [i for i in range(len(res1)) if len(res1[i])<=128 and len(res1[i])>=10]
#Write the compound list in form readable for LSTM autoencoder
compound_info = | pd.DataFrame({'src':to_use_compound_list,'trg':to_use_compound_list}) | pandas.DataFrame |
import requests
from bs4 import BeautifulSoup
import pandas as pd
def get_tables(urls, link=False):
"""Returns a dataframes list with the tables of the different groups.
Keyword arguments:
urls -- list with urls of the different groups
link -- indicates whether you want to include the url of every team in the dataframe
(default False)."""
# Declare header variable with browsers info
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/47.0.2526.106 Safari/537.36'}
tables_list = [] # Save results in a list
try:
# Create BeautifulSoup objects from our responses
for i, url in enumerate(urls):
response = requests.get(url, headers=headers)
url_bs = BeautifulSoup(response.content, 'html.parser')
# Lists
header = []
datos = []
# The find_all() method returns the tags with the required information.
tags_header = url_bs.find_all("div", {"class": "classificationHeadingItem"})
tags_data = url_bs.find_all("div",
{"class": ["classificationItemOddWrapper", "classificationItemEvenWrapper"]})
tags_link = url_bs.find_all("a", {"class": "classificationTeam"})
# Extract headers, teams, goals and url links.
for tag_header in tags_header:
header.append(tag_header.text)
header.insert(0, 'Equipo'), header.insert(0, 'POS')
header = header[:-1]
for tag_data in tags_data:
datos.append(tag_data.text)
links_lst = []
for tag_link in tags_link:
links_lst.append('https://www.futboleras.es' + tag_link.get('href'))
# Format and put the data into a dataframe.
df = pd.DataFrame([sub.split(' ') for sub in datos])
df.drop(df.columns[[0, 10, 11]], axis=1, inplace=True)
splitcol = df[2].str.split(' ', 2, expand=True)
# Add the points, group and url links of each team.
df.insert(1, 'Equipo', splitcol[1]), df.insert(2, 'PTOS', splitcol[2])
del (df[2])
df.columns = header
df.insert(2, 'Grupo', i + 1)
if link:
df['Link'] = links_lst
tables_list.append(df)
return tables_list
except Exception:
print('Enter a valid url list.')
def general_table(tables_list):
"""Returns the general table with all the teams.
Keyword arguments:
table_list -- list with all tables stored in dataframes"""
# Concatenate all dataframes.
df = pd.concat(tables_list, axis=0)
# Transform the columns to manipulate to integers.
df[['PTOS', 'PJ', 'POS', 'PG', 'PE', 'PP', 'GF', 'GC', 'DG']] = \
df[['PTOS', 'PJ', 'POS', 'PG', 'PE', 'PP', 'GF', 'GC', 'DG']].astype('int')
# Add the points per match.
df['PPP'] = (df.PTOS / df.PJ).round(2)
# Sorted table
df = df.sort_values(by=['PPP'], ascending=False)
return df
def top_scorers(urls, n=10, group=False):
"""Returns a list with the dataframes of the top scorers for each championship group.
Keyword arguments:
urls -- list with urls of the different top scorers groups.
n -- integer indicating the number of top scorers showed.
(default 10).
group -- integer indicating the dataframe group showed (default False returns
the dataframes list)."""
# Declare header variable with browsers info
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/47.0.2526.106 Safari/537.36'}
scorers_list = [] # Save results in a list
# Create BeautifulSoup objects from our responses
for i, url in enumerate(urls):
response = requests.get(url, headers=headers)
url_bs = BeautifulSoup(response.content, 'html.parser')
# Lists
jug = []
equipo = []
goles = []
# The find_all() method returns the tags with the required information.
tags_jug = url_bs.find_all("a", {"class": "scorersItemInfoPlayer"})
tags_equipo = url_bs.find_all("a", {"class": "scorersItemInfoTeam"})
tags_goles = url_bs.find_all("div", {"class": "scorersItemGoalsValue"})
# Extract players, teams, goals and url links.
for tag_jug in tags_jug:
jug.append(tag_jug.text)
for tag_equipo in tags_equipo:
equipo.append(tag_equipo.text)
for tag_goles in tags_goles:
goles.append(tag_goles.text)
# Format and put the data into a dataframe.
df = | pd.DataFrame({"Jugadora": jug[:20], "Equipo": equipo[:20], "Goles": goles[:20]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA), <NAME> (UW).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pandas as pd
# from pyopenms import *
import os
# import tkinter as tk
# from tkinter import ttk
from tkinter import messagebox
# from tkinter.messagebox import showinfo
from tkinter import *
# import matplotlib.pyplot as plt
# import matplotlib
# matplotlib.use("Agg")
from tkinter import filedialog
# import glob
import re
# import statistics
import datetime
# from matplotlib.pyplot import cm
# import seaborn as sns
def imp_map(maploc):
# map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
maploc.configure(state="normal")
maploc.delete(1.0, END)
map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
maploc.insert(INSERT, map1)
maploc.configure(state="disabled")
def imp_method1(method1loc):
# file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method1loc.configure(state="normal")
method1loc.delete(1.0, END)
file1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method1loc.insert(INSERT, file1)
method1loc.configure(state="disabled")
def imp_method2(method2loc):
# file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
method2loc.configure(state="normal")
method2loc.delete(1.0, END)
file2 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
method2loc.insert(INSERT, file2)
method2loc.configure(state="disabled")
def set_dir(dirloc_aggregate):
# setdir = filedialog.askdirectory()
dirloc_aggregate.configure(state="normal")
dirloc_aggregate.delete(1.0, END)
setdir = filedialog.askdirectory()
dirloc_aggregate.insert(INSERT, setdir)
dirloc_aggregate.configure(state="disabled")
def MergeApp(dirloc_aggregate, proname, method1loc, method2loc, maploc, CheckClustVis):
start = datetime.datetime.now()
os.chdir(dirloc_aggregate.get('1.0', 'end-1c'))
project = proname.get()
file1 = method1loc.get('1.0', 'end-1c')
file2 = method2loc.get('1.0', 'end-1c')
map1 = maploc.get('1.0', 'end-1c')
# Fix sample index(name) type for proper merge
def qcname(indname):
# if 'QC_SPIKE' in str(indname):
# return('QC_SPIKE')
# elif 'QC' in str(indname):
# return('QC')
# elif 'b' in str(indname) or 'm' in str(indname):
if re.search('[a-zA-Z]', str(indname)):
return (str(indname))
else:
return (int(indname))
# Import DataFrames from file1
spequant1 = pd.read_excel(file1, sheet_name='Lipid Species Concentrations', header=0, index_col=0, na_values='.')
specomp1 = pd.read_excel(file1, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant1 = pd.read_excel(file1, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant1 = pd.read_excel(file1, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp1 = pd.read_excel(file1, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant1.index = list(map(qcname, list(spequant1.index)))
specomp1.index = list(map(qcname, list(specomp1.index)))
claquant1.index = list(map(qcname, list(claquant1.index)))
faquant1.index = list(map(qcname, list(faquant1.index)))
facomp1.index = list(map(qcname, list(facomp1.index)))
# Import DataFrames from file2
if file2 != '':
spequant2 = pd.read_excel(file2, sheet_name='Lipid Species Concentrations', header=0, index_col=0,
na_values='.')
specomp2 = pd.read_excel(file2, sheet_name='Lipid Species Composition', header=0, index_col=0, na_values='.')
claquant2 = pd.read_excel(file2, sheet_name='Lipid Class Concentration', header=0, index_col=0, na_values='.')
faquant2 = pd.read_excel(file2, sheet_name='Fatty Acid Concentration', header=0, index_col=0, na_values='.')
facomp2 = pd.read_excel(file2, sheet_name='Fatty Acid Composition', header=0, index_col=0, na_values='.')
spequant2.index = list(map(qcname, list(spequant2.index)))
specomp2.index = list(map(qcname, list(specomp2.index)))
claquant2.index = list(map(qcname, list(claquant2.index)))
faquant2.index = list(map(qcname, list(faquant2.index)))
facomp2.index = list(map(qcname, list(facomp2.index)))
else:
spequant2 = pd.DataFrame()
specomp2 = pd.DataFrame()
claquant2 = pd.DataFrame()
faquant2 = pd.DataFrame()
facomp2 = pd.DataFrame()
# Merge DataFrames
spequant = pd.concat([spequant1, spequant2], axis=1, sort=False)
specomp = pd.concat([specomp1, specomp2], axis=1, sort=False)
claquant = pd.concat([claquant1, claquant2], axis=1, sort=False)
faquant = pd.concat([faquant1, faquant2], axis=1, sort=False)
facomp = pd.concat([facomp1, facomp2], axis=1, sort=False)
# Sort Columns in Merged DataFrames
spequant = spequant.reindex(sorted(spequant.columns), axis=1)
specomp = specomp.reindex(sorted(specomp.columns), axis=1)
claquant = claquant.reindex(sorted(claquant.columns), axis=1)
clacomp = claquant.apply(lambda x: 100 * x / x.sum(), axis=1) # get class composit
faquant = faquant.reindex(sorted(faquant.columns), axis=1)
facomp = facomp.reindex(sorted(facomp.columns), axis=1)
# Write Master data sheet
# master = pd.ExcelWriter(project+'_master.xlsx')
# spequant.to_excel(master, 'Species Quant')
# specomp.to_excel(master, 'Species Composit')
# claquant.to_excel(master, 'Class Quant')
# clacomp.to_excel(master, 'Class Composit')
# faquant.to_excel(master, 'FattyAcid Quant')
# facomp.to_excel(master, 'FattyAcid Composit')
# master.save()
# print('master sheet saved')
# Import Map
sampinfo = pd.read_excel(map1, sheet_name=0, header=1, index_col=0, na_values='.')
# Exp name dict
expname = dict(zip(sampinfo.ExpNum, sampinfo.ExpName))
sampinfo = sampinfo.drop(['ExpName'], axis=1)
sampinfo.index = list(map(qcname, list(sampinfo.index)))
sampinfo['SampleNorm'] = sampinfo['SampleNorm'].astype('float64')
# Create Normalized Sheets
# spenorm = spequant[list(map(lambda x: isinstance(x, int), spequant.index))].copy()
# #exclude sample with string name
# clanorm = claquant[list(map(lambda x: isinstance(x, int), claquant.index))].copy()
# fanorm = faquant[list(map(lambda x: isinstance(x, int), faquant.index))].copy()
spenorm = spequant.copy() # inlude all samples
clanorm = claquant.copy()
fanorm = faquant.copy()
spenorm = spenorm.divide(
40) # x0.025 to reverse /0.025 in the standard coef. /0.025 is there to simulate LWM result
clanorm = clanorm.divide(40)
fanorm = fanorm.divide(40)
spenorm = spenorm.divide(sampinfo['SampleNorm'], axis='index')
clanorm = clanorm.divide(sampinfo['SampleNorm'], axis='index')
fanorm = fanorm.divide(sampinfo['SampleNorm'], axis='index')
# Fix GroupName. If GroupName and GroupNum doesn't match, change GroupName to match GroupNum
for i in sampinfo['ExpNum'].unique().astype(int):
for ii in sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'].unique().astype(int):
gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# for i in range(1, int(max(sampinfo['ExpNum'])) + 1):
# for ii in range(1, int(max(sampinfo.loc[sampinfo['ExpNum'] == i, 'GroupNum'])) + 1):
# gNamlogic = np.logical_and(sampinfo['GroupNum'] == ii, sampinfo['ExpNum'] == i)
# sampinfo.loc[gNamlogic, "GroupName"] = sampinfo['GroupName'][gNamlogic].reset_index(drop=True)[0]
# Merge Map, using index (Sample column in map and sample name in raw data)
spequantin = pd.concat([sampinfo, spequant], axis=1, sort=False, join='inner')
specompin = pd.concat([sampinfo, specomp], axis=1, sort=False, join='inner')
claquantin = | pd.concat([sampinfo, claquant], axis=1, sort=False, join='inner') | pandas.concat |
import streamlit as st
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from PIL import Image
from scrapping import DataFetcher
from models import Models
from seir import SeirModel
corona = Image.open('images/title_logo.png')
st.image(corona)
st.write('Bem-vindo ao dashboard de acompanhamento de casos do Coronavírus no Brasil.')
st.write('Os dados apresentados aqui são disponibilizados por meio de APIs públicas. Para ver as fontes, acesse o repositório do projeto no Github. A atualização dos dados acontece várias vezes ao dia. Apenas números confirmados são apresentados.')
st.write('Utilize o menu ao lado para fazer a navegação.')
st.write('A maioria dos gráficos apresentados aqui são interativos: mostram valores ao passar do mouse, permitem zoom e posição de tela cheia. Explore os dados com o mouse!')
st.sidebar.title('Navegação')
actions = ['Situação atual', 'Previsões']
choice = st.sidebar.selectbox('Selecione uma opção', actions)
with st.spinner('Buscando dados...'):
data = DataFetcher()
st.write('________________________________')
brazil_general_code, brazil_states_code, world_cases_code = data.get_apis_status_code()
if choice == 'Situação atual':
if brazil_general_code == 200:
date, time = data.get_update_time()
st.write('<i>Dados atualizados em </i>', date, ' <i>às</i> ', time, unsafe_allow_html=True)
st.text('')
st.text('')
total_cases, deaths, recovers = data.get_main_counters()
st.write('<b>Casos totais até o momento: </b>', total_cases, unsafe_allow_html=True)
st.write('<b>Mortes confirmadas: </b>', deaths, unsafe_allow_html=True)
st.write('<b>Pessoas recuperadas: </b>', recovers, unsafe_allow_html=True)
else:
st.write("Dados indisponíveis no momento...")
if world_cases_code == 200:
cases_df = data.get_cases_timeline()
fig_daily_cases = go.Figure(data=go.Bar(x=cases_df['date'], y=cases_df['daily']))
fig_daily_cases.update_layout(title={'text':'<b>Novos casos por dia</b>', 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='Novos casos confirmados',
margin=dict(b=0, t=70))
st.plotly_chart(fig_daily_cases, use_container_width=True)
fig_cumulative_cases = go.Figure()
fig_cumulative_cases.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'],
line=dict(color='#17becf', width=5),
mode='lines+markers', marker=dict(size=8), name='Confirmados', fill='tozeroy'))
fig_cumulative_cases.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['deaths'],
line=dict(color='firebrick', width=5),
mode='lines+markers', marker=dict(size=8), name='Mortes', fill='tozeroy'))
fig_cumulative_cases.update_layout(title={'text':'<b>Casos e mortes acumulado</b>', 'x': 0.5, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='',
margin=dict(t=70), legend_orientation='h')
st.plotly_chart(fig_cumulative_cases, use_container_width=True)
else:
st.write("Dados indisponíveis no momento...")
if brazil_states_code == 200:
cases_by_state, _ = data.get_state_cases()
st.text('')
st.header('Situação dos estados')
### Using plotly table
fig_states_table = go.Figure(data=[go.Table(
columnwidth = [600,600],
header=dict(values=list(cases_by_state.columns),
fill_color='lightblue',
align='center'),
cells=dict(values=[cases_by_state['Estado'], cases_by_state['Casos Confirmados'],
cases_by_state['Mortes'], cases_by_state['Letalidade']],
fill_color='lavender',
align='center')
)
])
fig_states_table.update_layout(margin=dict(l=0, r=0, t=10, b=0))
st.plotly_chart(fig_states_table, use_container_width=True, config={'displayModeBar': False})
fig_state_cases = data.get_states_cases_plot()
st.plotly_chart(fig_state_cases, use_container_width=True)
else:
st.write("Dados indisponíveis no momento...")
if choice == 'Previsões':
st.sidebar.title('Selecione o modelo')
# Modelo auto-regressivo e SEIR necessitam de ser desenvolvidos!
model = st.sidebar.radio('', ['Exponencial e Polinomial', 'Rede Neural Artificial', 'SEIR (Simulação)'])
if model == 'Exponencial e Polinomial':
st.markdown('## Modelo Exponencial')
st.write('O modelo exponencial é indicado para modelar a epidemia nos seus estágios iniciais.')
st.write('Contudo, a análise da adequação da curva de casos ao modelo exponencial nos informa a respeito das medidas de contenção que estão sendo adotadas.')
st.write('Caso o ajuste ao modelo não seja satisfatório, significa que as medidas de contenção estão surtindo efeito em freiar a epidemia que, caso as medidas de contenção fossem inexistentes, teria seu número casos acompanhando a curva exponencial.')
st.write('Clique em "*compare data on hover*" para comparar a previsão e o real para cada dia.')
model = Models()
cases_last_20days, predictions, r2 = model.get_exponential_predictions()
cases_df = data.get_cases_timeline()
# Quality of last 10 days fitting to exponential model plot
fig_quality = go.Figure()
fig_quality.add_trace(go.Scatter(x=cases_last_20days['date'], y=cases_last_20days['log_cases'], line=dict(color='firebrick', width=4),
mode='lines+markers', marker=dict(size=10), name='Real'))
fig_quality.add_trace(go.Scatter(x=cases_last_20days['date'], y=np.log(predictions['pred_cases']), name='Ajustado'))
fig_quality.update_layout(title={'text': '<b>Qualidade do ajuste exponencial em janela de 20 dias</b><br>(R² = {})'.format(r2), 'x': 0.5, 'xanchor': 'center'},
yaxis_title='log (casos totais)', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_quality, use_container_width=True)
# Number of cases with predictions plot
fig_pred = go.Figure()
fig_pred.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'], line=dict(color='#7f7f7f', width=4),
mode='lines+markers', marker=dict(size=10), name='Dados'))
fig_pred.add_trace(go.Scatter(x=predictions['date'], y=predictions['pred_cases'], name='Ajuste', line=dict(color='red')))
fig_pred.update_layout(title={'text':'<b>Ajuste exponencial com previsão para os próximos 7 dias</b>', 'x': 0.5, 'xanchor': 'center'},
yaxis_title='Casos totais', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_pred, use_container_width=True)
st.markdown('## Modelo Polinomial')
st.write('''
O modelo polinomial não força a curva a um ajuste exponencial. Portanto, tem a característica de proporcionar um ajuste mais "suave",
capaz de captar as tendências mais recentes. Para este ajuste, está sendo utilizado um modelo polinomial de terceiro grau.
Clique em "*compare data on hover*" para comparar a previsão e o real para cada dia.
''')
polinomial_predictions = model.get_polinomial_predictions()
fig_pred_poli = go.Figure()
fig_pred_poli.add_trace(go.Scatter(x=cases_df['date'], y=cases_df['confirmed'], line=dict(color='#7f7f7f', width=4),
mode='lines+markers', marker=dict(size=10), name='Dados'))
fig_pred_poli.add_trace(go.Scatter(x=polinomial_predictions['date'], y=polinomial_predictions['pred_cases'], name='Ajuste', line=dict(color='green')))
fig_pred_poli.update_layout(title={'text':'<b>Ajuste polinomial com previsão para os próximos 7 dias</b>', 'x': 0.5, 'xanchor': 'center'},
yaxis_title='Casos totais', legend=dict(x=.1, y=0.9))
st.plotly_chart(fig_pred_poli, use_container_width=True)
if model == 'Rede Neural Artificial':
st.markdown('## Rede Neural Artificial')
st.write('Em desenvolvimento...')
if model == 'SEIR (Simulação)':
total_cases, deaths, recovers = data.get_main_counters()
st.markdown('## Modelo SEIR')
st.write(
'''
SEIR é um modelo comportamental em epidemiologia que busca modelar como uma doença se espalha através de uma população.
SEIR é um acrônimo para **S**usceptible, **E**xposed, **I**nfected, **R**ecovered, ou em português: Suscetíveis, Expostos, Infectados e Recuperados.
A ideia básica é que, quando uma doença é introduzida em uma população, as pessoas se movem de um estágio do modelo para o outro. Ou seja, as pessoas suscetíveis podem se expor ao vírus, contraí-lo e eventualmente se recuperar ou padecer.
''')
seir_image = Image.open('images/seir.png')
st.image(seir_image, use_column_width=True)
st.write(
'''
A modelagem leva em consideração três parâmetros principais: $\\alpha$, $\\beta$ e $\\gamma$.
* $\\alpha$ é o inverso do período de incubação do vírus. Tempo de incubação é o período em que o vírus fica no corpo da pessoa sem produzir sintomas.
* $\\beta$ é a taxa de contato médio na população. Este é o parâmetro influenciado por medidas de contenção social.
* $\\gamma$ é o inverso da média do período de infecção. Período de infecção é o tempo em que uma pessoa fica acometida pelo vírus e pode transmití-lo.
Para essa modelagem, o valor de cada parâmetro foi retirado de artigos publicados na área, especificamente:
* [Epidemic analysis of COVID-19 in China by dynamical modeling](https://arxiv.org/pdf/2002.06563.pdf)
* [Impact of non-pharmaceutical interventions (NPIs) to reduce COVID19 mortality and healthcare demand](https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf)
''')
st.markdown('### Como a modelagem foi feita para o Brasil')
st.write(
'''
Para o caso do Brasil, foi considerada uma população de 200 milhões de pessoas, sendo o número inicial de infectados
o número total de casos mais recentes que temos. Foi considerado que toda a população é suscetível e que, inicialmente,
o número de pessoas expostas (que contraíram o vírus mas estão em período de incubação) é 15 vezes o número de casos confirmados.
O fator 15 foi retirado de uma estimativa realizada em declarações do Ministério da Saúde.
A simulação sempre parte do cenário mais atual, ou seja, do dia de hoje considerando os números mais atualizados que temos.
O objetivo é tentar prever o cenário futuro baseado nos números mais recentes que temos e também demonstrar, neste cenário,
o impacto das medidas de isolamento social. Na simulação, o fator de contenção social foi levado em conta por meio do parâmetro *p*,
que possui valores entre 0 e 1. O valor de *p* = 1 seria o caso em que nenhuma medida de contenção social é adotada, ou seja, a vida cotinua normalmente.
O valor de *p* = 0 é apenas teórico, pois significaria zerar a taxa de transmissão do vírus, ou seja, absolutamente nenhuma transmissão entre a população.
A seguir é possível verificar os resultados da simulação para o cenário brasileiro, partindo de hoje e considerando os números mais recentes.
''')
seir_model = SeirModel(100, total_cases, recovered=deaths+recovers, p=1)
S, E, I, R = seir_model.get_model_results()
population = seir_model.N
# Prepare dates for plotting
start = pd.Timestamp('now')
end=pd.Timestamp('now') + pd.Timedelta(seir_model.t_max, unit='D')
timestamp_range = np.linspace(start.value, end.value, len(seir_model.t))
timestamp_range = | pd.to_datetime(timestamp_range) | pandas.to_datetime |
from datetime import datetime, timedelta
import operator
from typing import Any, Sequence, Type, Union, cast
import warnings
import numpy as np
from pandas._libs import NaT, NaTType, Timestamp, algos, iNaT, lib
from pandas._libs.tslibs.c_timestamp import integer_op_not_supported
from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64
from pandas._typing import DatetimeLikeScalar
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna
from pandas.core import missing, nanops, ops
from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts
from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin
import pandas.core.common as com
from pandas.core.indexers import check_bool_array_indexer
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.core.ops.invalid import invalid_comparison, make_invalid_op
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick
def _datetimelike_array_cmp(cls, op):
"""
Wrap comparison operations to convert Timestamp/Timedelta/Period-like to
boxed scalars/arrays.
"""
opname = f"__{op.__name__}__"
nat_result = opname == "__ne__"
@unpack_zerodim_and_defer(opname)
def wrapper(self, other):
if isinstance(other, str):
try:
# GH#18435 strings get a pass from tzawareness compat
other = self._scalar_from_string(other)
except ValueError:
# failed to parse as Timestamp/Timedelta/Period
return invalid_comparison(self, other, op)
if isinstance(other, self._recognized_scalars) or other is NaT:
other = self._scalar_type(other)
self._check_compatible_with(other)
other_i8 = self._unbox_scalar(other)
result = op(self.view("i8"), other_i8)
if isna(other):
result.fill(nat_result)
elif not is_list_like(other):
return invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
# TODO: could use pd.Index to do inference?
other = np.array(other)
if not isinstance(other, (np.ndarray, type(self))):
return invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(
op, self.astype(object), other
)
o_mask = isna(other)
elif not type(self)._is_recognized_dtype(other.dtype):
return invalid_comparison(self, other, op)
else:
# For PeriodDType this casting is unnecessary
other = type(self)._from_sequence(other)
self._check_compatible_with(other)
result = op(self.view("i8"), other.view("i8"))
o_mask = other._isnan
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return set_function_name(wrapper, opname, cls)
class AttributesMixin:
_data: np.ndarray
@classmethod
def _simple_new(cls, values, **kwargs):
raise AbstractMethodError(cls)
@property
def _scalar_type(self) -> Type[DatetimeLikeScalar]:
"""The scalar associated with this datelike
* PeriodArray : Period
* DatetimeArray : Timestamp
* TimedeltaArray : Timedelta
"""
raise AbstractMethodError(self)
def _scalar_from_string(
self, value: str
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
def _unbox_scalar(self, value: Union[Period, Timestamp, Timedelta, NaTType]) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
def _check_compatible_with(
self, other: Union[Period, Timestamp, Timedelta, NaTType], setitem: bool = False
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
setitem : bool, default False
For __setitem__ we may have stricter compatiblity resrictions than
for comparisons.
Raises
------
Exception
"""
raise AbstractMethodError(self)
class DatelikeOps:
"""
Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.
"""
@Substitution(
URL="https://docs.python.org/3/library/datetime.html"
"#strftime-and-strptime-behavior"
)
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
ndarray
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
result = self._format_native_types(date_format=date_format, na_rep=np.nan)
return result.astype(object)
class TimelikeOps:
"""
Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex.
"""
_round_doc = """
Perform {op} operation on the data to the specified `freq`.
Parameters
----------
freq : str or Offset
The frequency level to {op} the index to. Must be a fixed
frequency like 'S' (second) not 'ME' (month end). See
:ref:`frequency aliases <timeseries.offset_aliases>` for
a list of possible `freq` values.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
Only relevant for DatetimeIndex:
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
.. versionadded:: 0.24.0
nonexistent : 'shift_forward', 'shift_backward', 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
.. versionadded:: 0.24.0
Returns
-------
DatetimeIndex, TimedeltaIndex, or Series
Index of the same type for a DatetimeIndex or TimedeltaIndex,
or a Series with the same index for a Series.
Raises
------
ValueError if the `freq` cannot be converted.
Examples
--------
**DatetimeIndex**
>>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min')
>>> rng
DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00',
'2018-01-01 12:01:00'],
dtype='datetime64[ns]', freq='T')
"""
_round_example = """>>> rng.round('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.round("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_floor_example = """>>> rng.floor('H')
DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00',
'2018-01-01 12:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.floor("H")
0 2018-01-01 11:00:00
1 2018-01-01 12:00:00
2 2018-01-01 12:00:00
dtype: datetime64[ns]
"""
_ceil_example = """>>> rng.ceil('H')
DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00',
'2018-01-01 13:00:00'],
dtype='datetime64[ns]', freq=None)
**Series**
>>> pd.Series(rng).dt.ceil("H")
0 2018-01-01 12:00:00
1 2018-01-01 12:00:00
2 2018-01-01 13:00:00
dtype: datetime64[ns]
"""
def _round(self, freq, mode, ambiguous, nonexistent):
# round the local times
if is_datetime64tz_dtype(self):
# operate on naive timestamps, then convert back to aware
naive = self.tz_localize(None)
result = naive._round(freq, mode, ambiguous, nonexistent)
aware = result.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return aware
values = self.view("i8")
result = round_nsint64(values, mode, freq)
result = self._maybe_mask_results(result, fill_value=NaT)
return self._simple_new(result, dtype=self.dtype)
@Appender((_round_doc + _round_example).format(op="round"))
def round(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent)
@Appender((_round_doc + _floor_example).format(op="floor"))
def floor(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent)
@Appender((_round_doc + _ceil_example).format(op="ceil"))
def ceil(self, freq, ambiguous="raise", nonexistent="raise"):
return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent)
class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin, ExtensionArray):
"""
Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray
Assumes that __new__/__init__ defines:
_data
_freq
and that the inheriting class has methods:
_generate_range
"""
@property
def ndim(self) -> int:
return self._data.ndim
@property
def shape(self):
return self._data.shape
def reshape(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.reshape(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
def ravel(self, *args, **kwargs):
# Note: we drop any freq
data = self._data.ravel(*args, **kwargs)
return type(self)(data, dtype=self.dtype)
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@property
def asi8(self) -> np.ndarray:
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
# do not cache or you'll create a memory leak
return self._data.view("i8")
@property
def _ndarray_values(self):
return self._data
# ----------------------------------------------------------------
# Rendering Methods
def _format_native_types(self, na_rep="NaT", date_format=None):
"""
Helper method for astype when converting to strings.
Returns
-------
ndarray[str]
"""
raise AbstractMethodError(self)
def _formatter(self, boxed=False):
# TODO: Remove Datetime & DatetimeTZ formatters.
return "'{}'".format
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
@property
def nbytes(self):
return self._data.nbytes
def __array__(self, dtype=None) -> np.ndarray:
# used for Timedelta/DatetimeArray, overwritten by PeriodArray
if is_object_dtype(dtype):
return np.array(list(self), dtype=object)
return self._data
@property
def size(self) -> int:
"""The number of elements in this array."""
return np.prod(self.shape)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = lib.is_integer(key)
if lib.is_scalar(key) and not is_int:
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), "
"numpy.newaxis (`None`) and integer or boolean "
"arrays are valid indices"
)
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
if lib.is_scalar(val):
# i.e. self.ndim == 1
return self._box_func(val)
return type(self)(val, dtype=self.dtype)
if com.is_bool_indexer(key):
key = check_bool_array_indexer(self, key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
is_period = is_period_dtype(self)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
elif key is Ellipsis:
# GH#21282 indexing with Ellipsis is similar to a full slice,
# should preserve `freq` attribute
freq = self.freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, dtype=self.dtype, freq=freq)
return result
return self._simple_new(result, dtype=self.dtype, freq=freq)
def __setitem__(
self,
key: Union[int, Sequence[int], Sequence[bool], slice],
value: Union[NaTType, Any, Sequence[Any]],
) -> None:
# I'm fudging the types a bit here. "Any" above really depends
# on type(self). For PeriodArray, it's Period (or stuff coercible
# to a period in from_sequence). For DatetimeArray, it's Timestamp...
# I don't know if mypy can do that, possibly with Generics.
# https://mypy.readthedocs.io/en/latest/generics.html
if lib.is_scalar(value) and not isna(value):
value = com.maybe_box_datetimelike(value)
if is_list_like(value):
is_slice = isinstance(key, slice)
if lib.is_scalar(key):
raise ValueError("setting an array element with a sequence.")
if not is_slice:
key = cast(Sequence, key)
if len(key) != len(value) and not com.is_bool_indexer(key):
msg = (
f"shape mismatch: value array of length '{len(key)}' "
"does not match indexing result of length "
f"'{len(value)}'."
)
raise ValueError(msg)
elif not len(key):
return
value = type(self)._from_sequence(value, dtype=self.dtype)
self._check_compatible_with(value, setitem=True)
value = value.asi8
elif isinstance(value, self._scalar_type):
self._check_compatible_with(value, setitem=True)
value = self._unbox_scalar(value)
elif is_valid_nat_for_dtype(value, self.dtype):
value = iNaT
else:
msg = (
f"'value' should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got '{type(value).__name__}' instead."
)
raise TypeError(msg)
self._data[key] = value
self._maybe_clear_freq()
def _maybe_clear_freq(self):
# inplace operations like __setitem__ may invalidate the freq of
# DatetimeArray and TimedeltaArray
pass
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
from pandas import Categorical
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype)
and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
return Categorical(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
def view(self, dtype=None):
if dtype is None or dtype is self.dtype:
return type(self)(self._data, dtype=self.dtype)
return self._data.view(dtype=dtype)
# ------------------------------------------------------------------
# ExtensionArray Interface
def unique(self):
result = unique1d(self.asi8)
return type(self)(result, dtype=self.dtype)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to an i8 representation,
raising ValueError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : np.int64
Raises
------
ValueError
"""
if isna(fill_value):
fill_value = iNaT
elif isinstance(fill_value, self._recognized_scalars):
self._check_compatible_with(fill_value)
fill_value = self._scalar_type(fill_value)
fill_value = self._unbox_scalar(fill_value)
else:
raise ValueError(
f"'fill_value' should be a {self._scalar_type}. Got '{fill_value}'."
)
return fill_value
def take(self, indices, allow_fill=False, fill_value=None):
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_values = take(
self.asi8, indices, allow_fill=allow_fill, fill_value=fill_value
)
return type(self)(new_values, dtype=self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
dtypes = {x.dtype for x in to_concat}
assert len(dtypes) == 1
dtype = list(dtypes)[0]
values = np.concatenate([x.asi8 for x in to_concat])
return cls(values, dtype=dtype)
def copy(self):
values = self.asi8.copy()
return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq)
def _values_for_factorize(self):
return self.asi8, iNaT
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
def _values_for_argsort(self):
return self._data
# ------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
"""
if isinstance(value, str):
value = self._scalar_from_string(value)
if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)):
raise ValueError(f"Unexpected type for 'value': {type(value)}")
self._check_compatible_with(value)
if isinstance(value, type(self)):
value = value.asi8
else:
value = self._unbox_scalar(value)
return self.asi8.searchsorted(value, side=side, sorter=sorter)
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view("i8"), dtype=self.dtype)
def value_counts(self, dropna=False):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaT values.
Returns
-------
Series
"""
from pandas import Series, Index
if dropna:
values = self[~self.isna()]._data
else:
values = self._data
cls = type(self)
result = value_counts(values, sort=False, dropna=dropna)
index = Index(
cls(result.index.view("i8"), dtype=self.dtype), name=result.index.name
)
return Series(result.values, index=index, name=result.name)
def map(self, mapper):
# TODO(GH-23179): Add ExtensionArray.map
# Need to figure out if we want ExtensionArray.map first.
# If so, then we can refactor IndexOpsMixin._map_values to
# a standalone function and call from here..
# Else, just rewrite _map_infer_values to do the right thing.
from pandas import Index
return Index(self).map(mapper).array
# ------------------------------------------------------------------
# Null Handling
def isna(self):
return self._isnan
@property # NB: override with cache_readonly in immutable subclasses
def _isnan(self):
"""
return if each value is nan
"""
return self.asi8 == iNaT
@property # NB: override with cache_readonly in immutable subclasses
def _hasnans(self):
"""
return if I have any nans; enables various perf speedups
"""
return bool(self._isnan.any())
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None):
"""
Parameters
----------
result : a ndarray
fill_value : object, default iNaT
convert : str, dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine.
"""
if self._hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def fillna(self, value=None, method=None, limit=None):
# TODO(GH-20300): remove this
# Just overriding to ensure that we avoid an astype(object).
# Either 20300 or a `_values_for_fillna` would avoid this duplication.
if isinstance(value, ABCSeries):
value = value.array
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
if method == "pad":
func = missing.pad_1d
else:
func = missing.backfill_1d
values = self._data
if not is_period_dtype(self):
# For PeriodArray self._data is i8, which gets copied
# by `func`. Otherwise we need to make a copy manually
# to avoid modifying `self` in-place.
values = values.copy()
new_values = func(values, limit=limit, mask=mask)
if is_datetime64tz_dtype(self):
# we need to pass int64 values to the constructor to avoid
# re-localizing incorrectly
new_values = new_values.view("i8")
new_values = type(self)(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
# ------------------------------------------------------------------
# Frequency Properties/Methods
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._freq
@freq.setter
def freq(self, value):
if value is not None:
value = frequencies.to_offset(value)
self._validate_frequency(self, value)
self._freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@property # NB: override with cache_readonly in immutable subclasses
def inferred_freq(self):
"""
Tryies to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
@property # NB: override with cache_readonly in immutable subclasses
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@property # NB: override with cache_readonly in immutable subclasses
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return | frequencies.Resolution.get_str(self._resolution) | pandas.tseries.frequencies.Resolution.get_str |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Creating slabs from IrOx polymorph dataset
# ---
#
# This notebook is time consuming. Additional processing of the slab (correct vacuum applied, and bulk constraints, etc.) are done in `process_slabs.ipynb`
# + active=""
# # 565 total polymorphs from first project
#
# # 122 polymorphs are octahedral and unique
# # >>> Removing 12 systems manually because they are not good
# # -----
# # 110 polymorphs now
#
#
# # # ###############################################
# # 49 are layered materials
# # 61 are non-layered materials
# # -----
# # 61 polymorphs now
#
#
# # # ###############################################
# # 15 polymorphs are above the 0.3 eV/atom above hull cutoff
# # -----
# # 46 polymorphs now
# -
# # Import Modules
# +
import os
print(os.getcwd())
import sys
import time
import signal
import random
from pathlib import Path
from IPython.display import display
import pickle
import json
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import ase
from ase import io
# from tqdm import tqdm
from tqdm.notebook import tqdm
# #########################################################
from misc_modules.pandas_methods import drop_columns
from misc_modules.misc_methods import GetFriendlyID
from ase_modules.ase_methods import view_in_vesta
# #########################################################
from proj_data import metal_atom_symbol
from methods import (
get_df_dft,
symmetrize_atoms,
get_structure_coord_df,
remove_atoms,
compare_facets_for_being_the_same,
TimeoutException,
sigalrm_handler,
)
# #########################################################
from local_methods import (
analyse_local_coord_env,
check_if_sys_processed,
remove_nonsaturated_surface_metal_atoms,
remove_noncoord_oxygens,
create_slab_from_bulk,
create_final_slab_master,
create_save_dataframe,
constrain_slab,
read_data_json,
calc_surface_area,
create_slab,
update_sys_took_too_long,
create_save_struct_coord_df,
)
# -
# # Script Inputs
# +
# timelimit_seconds = 0.4 * 60
# timelimit_seconds = 10 * 60
# timelimit_seconds = 40 * 60
timelimit_seconds = 100 * 60
facets_manual = [
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(1, 1, 1),
# (0, 1, 1),
# (1, 0, 1),
# (1, 1, 0),
]
facets_manual = [t for t in (set(tuple(i) for i in facets_manual))]
frac_of_layered_to_include = 0.0
phase_num = 2
# +
# max_surf_a = 200
# Distance from top z-coord of slab that we'll remove atoms from
# dz = 4
# -
# # Read Data
# +
# #########################################################
df_dft = get_df_dft()
# #########################################################
# Bulks not to run, manually checked to be erroneous/bad
data_path = os.path.join(
"in_data/bulks_to_not_run.json")
with open(data_path, "r") as fle:
bulks_to_not_run = json.load(fle)
# #########################################################
from methods import get_df_xrd
df_xrd = get_df_xrd()
# #########################################################
from methods import get_df_bulk_manual_class
df_bulk_manual_class = get_df_bulk_manual_class()
# #########################################################
from methods import get_bulk_selection_data
bulk_selection_data = get_bulk_selection_data()
bulk_ids__octa_unique = bulk_selection_data["bulk_ids__octa_unique"]
# #########################################################
from methods import get_df_slab_ids, get_slab_id
df_slab_ids = get_df_slab_ids()
# #########################################################
from methods import get_df_slab
df_slab_old = get_df_slab(mode="almost-final")
# #########################################################
from local_methods import df_dft_for_slab_creation
df_dft_i = df_dft_for_slab_creation(
df_dft=df_dft,
bulk_ids__octa_unique=bulk_ids__octa_unique,
bulks_to_not_run=bulks_to_not_run,
df_bulk_manual_class=df_bulk_manual_class,
frac_of_layered_to_include=frac_of_layered_to_include,
verbose=False,
)
# +
# get_bulk_selection_data().keys()
# +
# assert False
# +
# TEMP
# mj7wbfb5nt 011 (0, 1, 1)
df = df_slab_old
df = df[
(df["bulk_id"] == "mj7wbfb5nt") &
(df["facet"] == "011") &
# (df[""] == "") &
[True for i in range(len(df))]
]
df
# +
# assert False
# -
# # Create needed folders
# +
root_dir = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/creating_slabs",
)
directory = "out_data/final_slabs"
if not os.path.exists(directory):
os.makedirs(directory)
directory = "out_data/slab_progression"
if not os.path.exists(directory):
os.makedirs(directory)
directory = "out_data/df_coord_files"
if not os.path.exists(directory):
os.makedirs(directory)
directory = "out_data/temp_out"
if not os.path.exists(directory):
os.makedirs(directory)
# -
print(
"Number of bulk structures that are octahedral and unique:",
"\n",
len(bulk_ids__octa_unique))
# ### Checking that df_slab_ids are unique, no repeat entries
# +
if not df_slab_ids.index.is_unique:
print("df_slab_ids isn't unique")
print("df_slab_ids isn't unique")
print("df_slab_ids isn't unique")
print("df_slab_ids isn't unique")
print("df_slab_ids isn't unique")
print("Duplicate rows here (NOT GOOD!!!)")
display(
df_slab_ids[df_slab_ids.index.duplicated(keep=False)]
)
df = df_slab_old
df = df[
(df["bulk_id"] == "v1xpx482ba") &
(df["facet"] == "20-21") &
# (df["facet"] == "20-23") &
[True for i in range(len(df))]
]
df
# -
# ## Removing duplicate rows
# +
# #########################################################
slab_ids_to_drop = []
# #########################################################
group_cols = ["bulk_id", "facet", ]
grouped = df_slab_old.groupby(group_cols)
for name_i, group_i in grouped:
if group_i.shape[0] > 1:
# print(name_i)
# display(group_i)
# name_i = ('xw9y6rbkxr', '10-12')
# group_i = grouped.get_group(name_i)
grp_0 = group_i[group_i.status == "Took too long"]
grp_1 = group_i[~group_i.slab_final.isna()]
if grp_1.shape[0] > 0:
if grp_0.shape[0] > 0:
slab_ids_to_drop_i = grp_0.index.tolist()
slab_ids_to_drop.extend(slab_ids_to_drop_i)
# df_slab_old.loc[slab_ids_to_drop]
df_slab_old = df_slab_old.drop(slab_ids_to_drop)
# +
# assert False
# -
# # Creating slabs from bulks
# ## Which systems previously took too long
# +
data = read_data_json()
systems_that_took_too_long = data.get("systems_that_took_too_long", [])
systems_that_took_too_long_2 = []
for i in systems_that_took_too_long:
systems_that_took_too_long_2.append(i[0] + "_" + i[1])
print(
len(systems_that_took_too_long),
" systems took too long to process and will be ignored",
sep="")
# +
df_slab_old_tmp = df_slab_old.reset_index(level=0, inplace=False)
df_slab_old_tmp = df_slab_old_tmp.set_index(["bulk_id", "facet", ], drop=False, )
# # df_slab_old.set_index?
# -
print(
"This was True before, look into it if it's not",
"\n",
"\n",
"df_slab_old_tmp.index.is_unique:",
"\n",
df_slab_old_tmp.index.is_unique,
sep="")
# +
systems_that_took_too_long__new = []
for sys_i in systems_that_took_too_long:
# print(sys_i)
atoms_found = False
name_i = (sys_i[0], sys_i[1])
if name_i in df_slab_old_tmp.index:
# #####################################################
row_i = df_slab_old_tmp.loc[sys_i[0], sys_i[1]]
# #####################################################
slab_final_i = row_i.slab_final
# #####################################################
if isinstance(slab_final_i, ase.atoms.Atoms):
atoms_found = True
else:
tmp = 42
keep_sys_in_list = True
if atoms_found:
keep_sys_in_list = False
if keep_sys_in_list:
systems_that_took_too_long__new.append(sys_i)
# ##########################################################
# ##########################################################
data = read_data_json()
data["systems_that_took_too_long"] = systems_that_took_too_long__new
data_path = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/creating_slabs",
"out_data/data.json")
with open(data_path, "w") as fle:
json.dump(data, fle, indent=2)
# -
len(systems_that_took_too_long__new)
len(systems_that_took_too_long)
# +
# assert False
# +
# assert False
# +
# df_slab_old[df_slab_old.bulk_id == "n36axdbw65"]
# -
# ## Figuring out which systems haven't been run yet
# +
# #########################################################
data_dict_list = []
# #########################################################
systems_not_processed = []
# #########################################################
for i_cnt, bulk_id in enumerate(df_dft_i.index.tolist()):
# #####################################################
row_i = df_dft.loc[bulk_id]
# #####################################################
bulk_id_i = row_i.name
atoms = row_i.atoms
# #####################################################
# #####################################################
row_xrd_i = df_xrd.loc[bulk_id]
# #####################################################
top_facets_i = row_xrd_i.top_facets
all_xrd_facets_i = row_xrd_i.all_xrd_facets
facet_rank_i = row_xrd_i.facet_rank
# #####################################################
num_of_facets = 5
# num_of_facets = 8
top_facets_i = top_facets_i[0:num_of_facets]
facet_rank_i = facet_rank_i[0:num_of_facets]
# #####################################################
# Facet manipulation ##################################
facets_manual_2 = []
for i in facets_manual:
if i not in all_xrd_facets_i:
facets_manual_2.append(i)
df_facets_0 = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import bisect
import re
import warnings
import six
import pandas as pd
from rqdatac.client import get_client
from rqdatac.utils import to_date, datetime_to_int14, to_date_str, str_to_dt_time, int8_to_date
from rqdatac.validators import (
ensure_list_of_string,
ensure_date_int,
check_type,
ensure_date_str,
ensure_order_book_id,
ensure_order_book_ids,
check_items_in_container,
ensure_date_range,
ensure_int,
ensure_string,
ensure_date_or_today_int,
ensure_string_in
)
from rqdatac.services.concept import concept_names as get_concept_names
from rqdatac.services.shenwan import shenwan_instrument_industry
from rqdatac.services.constant import SectorCode, SectorCodeItem, IndustryCode, IndustryCodeItem
from rqdatac.services.calendar import get_previous_trading_date, is_trading_date, has_night_trading
from rqdatac.decorators import export_as_api, ttl_cache, compatible_with_parm
from dateutil.relativedelta import relativedelta
def _id_convert_one(order_book_id): # noqa: C901
# hard code
if order_book_id in {"T00018", "T00018.SH", "T00018.XSHG", "SH.T00018"}:
return "990018.XSHG"
if order_book_id.isdigit():
if order_book_id.startswith("0") or order_book_id.startswith("3"):
return order_book_id + ".XSHE"
elif (
order_book_id.startswith("5")
or order_book_id.startswith("6")
or order_book_id.startswith("9")
or order_book_id.startswith("15")
):
return order_book_id + ".XSHG"
else:
raise ValueError("order_book_ids should be str like 000001, 600000")
order_book_id = order_book_id.upper()
if order_book_id.endswith(".XSHG") or order_book_id.endswith(".XSHE"):
return order_book_id
if order_book_id.startswith("SZ"):
return order_book_id.replace(".", "")[2:] + ".XSHE"
elif order_book_id.startswith("SH"):
return order_book_id.replace(".", "")[2:] + ".XSHG"
elif order_book_id.endswith("SZ"):
return order_book_id.replace(".", "")[:-2] + ".XSHE"
elif order_book_id.endswith("SH"):
return order_book_id.replace(".", "")[:-2] + ".XSHG"
# 期货
order_book_id = order_book_id.replace("-", "").split(".")[0]
try:
res = re.findall(r"^([A-Z]+)(\d+)([PC]\w+)?", order_book_id)[0]
except IndexError:
raise ValueError("unknown order_book_id: {}".format(order_book_id))
if len(res[1]) is 3 and res[1] != '888':
year = str(datetime.datetime.now().year + 3)
if res[1][0] > year[-1]:
num = str(int(year[-2]) - 1)
else:
num = year[-2]
return res[0] + num + res[1] + res[2]
return order_book_id
@export_as_api
def id_convert(order_book_ids):
"""合约格式转换
:param order_book_ids: str 或 str list, 如'000001', 'SZ000001', '000001SZ',
'000001.SZ', 纯数字str默认为股票类型
:returns: str 或 str list, 米筐格式的合约
"""
if isinstance(order_book_ids, six.string_types):
return _id_convert_one(order_book_ids)
elif isinstance(order_book_ids, list):
return [_id_convert_one(o) for o in order_book_ids]
else:
raise ValueError("order_book_ids should be str or list")
def _id_compatible(order_book_id):
if order_book_id.endswith("XSHE"):
return order_book_id[:-4] + "SZ"
elif order_book_id.endswith("XSHG"):
return order_book_id[:-4] + "SH"
else:
return order_book_id
@ttl_cache(3 * 3600)
def _all_instruments_list(market):
return [Instrument(i) for i in get_client().execute("all_instruments", market=market)]
@ttl_cache(3 * 3600)
def _all_instruments_dict(market):
ins = _all_instruments_list(market)
result = {i.order_book_id: i for i in ins}
for i in ins:
result[i.symbol] = i
if i.type == "Convertible":
result[_id_compatible(i.order_book_id)] = i
try:
result["沪深300"] = result["000300.XSHG"]
result["中证500"] = result["000905.XSHG"]
result[result["SSE180.INDX"].symbol] = result["000010.XSHG"]
except KeyError:
pass
return result
def get_underlying_listed_date(underlying_symbol, ins_type, market="cn"):
""" 获取期货或者期权的某个品种的上市日期"""
ins_list = _all_instruments_list(market)
listed_dates = [i.listed_date for i in ins_list
if (getattr(i, "underlying_symbol", "") == underlying_symbol
and i.type == ins_type and i.listed_date != "0000-00-00")]
return min(listed_dates)
def get_tick_size(order_book_id, market="cn"):
"""获取合约价格最小变动单位
:param order_book_id: 如: FU1703
:param market: 如:'cn' (Default value = "cn")
:returns: float
"""
return get_client().execute("get_tick_size", order_book_id, market=market)
HK_STOCK_PRICE_SECTIONS = [0.25, 0.5, 10, 20, 100, 200, 500, 1000, 2000, 5000]
HK_STOCK_TICK_SIZES = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5]
# noinspection All
class Instrument(object):
def __init__(self, d):
self.__dict__ = d
def __repr__(self):
if self.has_shenwan_info() and not hasattr(self, "_shenwan_industry_code"):
self.shenwan_industry()
return "{}({})".format(
type(self).__name__,
", ".join(
[
"{}={!r}".format(k.lstrip("_"), v)
for k, v in self.__dict__.items()
if v is not None
]
),
)
@property
def concept_names(self):
return get_concept_names(self.order_book_id)
def days_from_listed(self, date=None):
if self.listed_date == "0000-00-00":
return -1
date = to_date(date) if date else datetime.date.today()
if self.de_listed_date != "0000-00-00" and date > to_date(self.de_listed_date):
# 晚于退市日期
return -1
listed_date = to_date(self.listed_date)
ipo_days = (date - listed_date).days
return ipo_days if ipo_days >= 0 else -1
def days_to_expire(self, date=None):
if (
self.type != "Future"
or self.listed_date == "0000-00-00" # 排除虚拟合约
):
return -1
date = to_date(date) if date else datetime.date.today()
maturity_date = to_date(self.maturity_date)
days = (maturity_date - date).days
return days if days >= 0 else -1
def tick_size(self, price=None):
if self.exchange == "XHKG":
check_type(price, (int, float), "price")
index = bisect.bisect_left(HK_STOCK_PRICE_SECTIONS, price)
return HK_STOCK_TICK_SIZES[index]
elif self.type in ["CS", "INDX"]:
return 0.01
elif self.type in ["ETF", "LOF", "FenjiB", "FenjiA", "FenjiMu"]:
return 0.001
elif self.type == "Convertible":
if self.exchange == "XSHG":
return 0.01
else:
return 0.001
elif self.type not in ["Future", "Option"]:
return -1
return get_tick_size(self.order_book_id)
def has_shenwan_info(self):
return self.type == "CS" and self.exchange in {"XSHE", "XSHG"}
def shenwan_industry(self, date=None):
if self.has_shenwan_info():
if date is None:
if hasattr(self, "_shenwan_industry_code"):
return (self._shenwan_industry_code, self._shenwan_industry_name)
result = shenwan_instrument_industry(self.order_book_id, date)
if result is None:
self._shenwan_industry_code, self._shenwan_industry_name = (None, None)
return None
else:
if date is None:
self._shenwan_industry_code, self._shenwan_industry_name = result
return {"code": result[0], "name": result[1]}
@property
def shenwan_industry_code(self):
if not self.has_shenwan_info():
return None
if not hasattr(self, "_shenwan_industry_code"):
self.shenwan_industry()
return self._shenwan_industry_code
@property
def shenwan_industry_name(self):
if not self.has_shenwan_info():
return None
if not hasattr(self, "_shenwan_industry_name"):
self.shenwan_industry()
return self._shenwan_industry_name
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def instruments(order_book_ids, market="cn"):
"""获取证券详细信息
:param order_book_ids: 证券ID列表, 如'000001.XSHE', 'AAPL.US'. 注意, 所有列表中的证券需要属于同一个国家。
:param market: 证券所属国家, 如 cn, us (Default value = "cn")
:returns: 对应证券的列表
"""
all_dict = _all_instruments_dict(market)
if isinstance(order_book_ids, six.string_types):
try:
return all_dict[order_book_ids]
except KeyError:
warnings.warn('unknown order_book_id: {}'.format(order_book_ids))
return
all_list = (all_dict.get(i) for i in order_book_ids)
return [i for i in all_list if i]
VALID_TYPES = {"CS", "ETF", "LOF", "INDX", "Future", "Spot", "Option", "Convertible", "Repo"}
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def all_instruments(type=None, date=None, market="cn"):
"""获得某个国家的全部证券信息
:param type: (Default value = None)
:param date: (Default value = None)
:param market: (Default value = "cn")
"""
if type is None:
itype = VALID_TYPES
else:
type = ensure_list_of_string(type)
itype = set()
for t in type:
if t.upper() == "STOCK":
itype.add("CS")
elif t.upper() == "FUND":
itype = itype.union({"ETF", "LOF"})
elif t.upper() == "INDEX":
itype.add("INDX")
elif t not in VALID_TYPES:
raise ValueError("invalid type: {}, chose any in {}".format(type, VALID_TYPES))
else:
itype.add(t)
if date:
date = ensure_date_str(date)
cond = lambda x: ( # noqa: E731
x.type in itype
and (x.listed_date <= date or x.listed_date == "0000-00-00")
and (
x.de_listed_date == "0000-00-00"
or (
x.de_listed_date >= date
and x.type in ("Future", "Option")
or (x.de_listed_date > date and x.type not in ("Future", "Option"))
)
)
)
else:
cond = lambda x: x.type in itype # noqa: E731
ins_ret = filter(cond, _all_instruments_list(market))
if len(itype) == 1:
df = pd.DataFrame([v.__dict__ for v in ins_ret])
else:
df = pd.DataFrame(
[
(
v.order_book_id,
v.symbol,
getattr(v, "abbrev_symbol", None),
v.type,
v.listed_date,
v.de_listed_date,
)
for v in ins_ret
],
columns=[
"order_book_id",
"symbol",
"abbrev_symbol",
"type",
"listed_date",
"de_listed_date",
],
)
return df
def to_sector_name(s):
for _, v in SectorCode.__dict__.items():
if isinstance(v, SectorCodeItem):
if v.cn == s or v.en == s or v.name == s:
return v.name
return s
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def sector(code, market="cn"):
"""获取某个板块的股票列表。目前支持的板块分类具体可以查询以下网址:
https://www.ricequant.com/api/research/chn#research-API-sector
:param code: 可以使用板块英文名字如'Energy', 或者 sector_code.Energy
:param market: 地区代码, 如'cn' (Default value = "cn")
:returns: 板块全部股票列表
"""
check_type(code, (SectorCodeItem, six.string_types), "code")
if isinstance(code, SectorCodeItem):
code = code.name
else:
code = to_sector_name(code)
return [
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "CS" and v.sector_code == code
]
def to_industry_code(s):
for _, v in IndustryCode.__dict__.items():
if isinstance(v, IndustryCodeItem):
if v.name == s:
return v.code
return s
@export_as_api
@compatible_with_parm(name="country", value="cn", replace="market")
def industry(code, market="cn"):
"""获取某个行业的股票列表。目前支持的行业列表具体可以查询以下网址:
https://www.ricequant.com/api/research/chn#research-API-industry
:param code: 行业代码,如 A01, 或者 industry_code.A01
:param market: 地区代码, 如'cn' (Default value = "cn")
:returns: 行业全部股票列表
"""
if not isinstance(code, six.string_types):
code = code.code
else:
code = to_industry_code(code)
return [
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "CS" and v.industry_code == code
]
@export_as_api
def get_future_contracts(underlying_symbol, date=None, market="cn"):
import rqdatac
import warnings
msg = "'get_future_contracts' is deprecated, please use 'futures.get_contracts' instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return rqdatac.futures.get_contracts(underlying_symbol, date, market)
@export_as_api(namespace='futures')
def get_contracts(underlying_symbol, date=None, market="cn"):
"""获得中国市场某个时间某个期货品种正在交易的合约列表
:param underlying_symbol: 期货品种
:param date: 日期,可以为str,datetime,date,pd.Timestamp 等类型
:param market: (Default value = "cn")
:returns: list of order book id
"""
if date is None:
date = datetime.date.today()
date = ensure_date_str(date)
return sorted(
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "Future"
and v.underlying_symbol == underlying_symbol
and v.listed_date != "0000-00-00"
and v.listed_date <= date <= v.de_listed_date
)
@export_as_api
def jy_instrument_industry(order_book_ids, date=None, level=1, expect_df=False, market="cn"):
"""获取股票对应的聚源行业
:param order_book_ids: 股票列表,如['000001.XSHE', '000002.XSHE']
:param date: 如 '2015-01-07' (Default value = None)
:param level: 聚源等级,0, 1, 2, 3, 'customized' (Default value = 1)
:param expect_df: 返回 DataFrame,默认为 False
:param market: (Default value = "cn")
:returns: code, name
返回输入日期最近交易日的股票对应聚源行业以及对应的聚源等级
"""
if level not in (0, 1, 2, 3, "customized"):
raise ValueError("level should in 0, 1, 2, 3, 'customized'")
order_book_ids = ensure_order_book_ids(order_book_ids, market=market)
if not date:
date = ensure_date_int(get_previous_trading_date(datetime.date.today(), market=market))
else:
date = ensure_date_int(date)
df = get_client().execute("jy_instrument_industry", order_book_ids, date, level, market=market)
if not df:
return
if len(order_book_ids) == 1 and not expect_df:
r = df[0]
if level == 0:
return r["first_industry_name"], r["second_industry_name"], r["third_industry_name"]
return r["industry_name"]
return pd.DataFrame(df).set_index("order_book_id")
@export_as_api(namespace="econ")
def get_reserve_ratio(reserve_type="all", start_date=None, end_date=None, market="cn"):
"""获取存款准备金率
:param reserve_type: 存款准备金详细类别,默认为'all',目前仅支持'all'、'major'、'other'类别的查询
:param start_date: 开始查找时间,如'20180501',默认为上一年的当天
:param end_date: 截止查找时间,如'20180501',默认为当天
:param market: (Default value = "cn")
:return: pd.DataFrame
"""
check_items_in_container(reserve_type, ["all", "major", "other"], "reserve_type")
start_date, end_date = ensure_date_range(start_date, end_date, delta=relativedelta(years=1))
ret = get_client().execute(
"econ.get_reserve_ratio", reserve_type, start_date, end_date, market
)
if not ret:
return
columns = ["info_date", "effective_date", "reserve_type", "ratio_floor", "ratio_ceiling"]
df = pd.DataFrame(ret, columns=columns).set_index("info_date").sort_index(ascending=True)
return df
@export_as_api(namespace="econ")
def get_money_supply(start_date=None, end_date=None, market="cn"):
"""获取货币供应量信息
:param start_date: 开始日期,默认为一年前
:param end_date: 结束日期,默认为今天
:param market: (Default value = "cn")
"""
check_items_in_container("info_date", ["info_date", "effective_date"], "date_type")
start_date, end_date = ensure_date_range(start_date, end_date, delta=relativedelta(years=1))
data = get_client().execute("econ.get_money_supply", start_date, end_date, market=market)
if not data:
return
columns = [
"info_date",
"effective_date",
"m2",
"m1",
"m0",
"m2_growth_yoy",
"m1_growth_yoy",
"m0_growth_yoy",
]
df = pd.DataFrame(data, columns=columns).set_index("info_date").sort_index(ascending=True)
return df
@export_as_api
def get_main_shareholder(
order_book_id, start_date=None, end_date=None, is_total=False, market="cn"
):
"""获取十大股东信息
:param order_book_id: 股票代码
:param start_date: 开始日期,默认为一年前
:param end_date: 结束日期,默认为今天
:param is_total: 是否十大股东, True 和 False,默认为False
:param market: (Default value = "cn")
"""
check_items_in_container(is_total, [True, False], "is_total")
start_date, end_date = ensure_date_range(start_date, end_date, delta=relativedelta(years=1))
ret = get_client().execute(
"get_main_shareholder", order_book_id, start_date, end_date, is_total, market=market
)
if not ret:
return
columns = ['info_date', 'end_date', 'rank', 'shareholder_name', 'shareholder_attr', 'shareholder_kind',
'shareholder_type', 'hold_percent_total', 'hold_percent_float', 'share_pledge', 'share_freeze']
df = pd.DataFrame(ret, columns=columns).sort_values(by=['info_date', 'rank']).set_index('info_date')
return df
@export_as_api
def get_current_news(n=None, start_time=None, end_time=None, channels=None):
"""获取新闻
:param n: 新闻条数, n 和 start_time/end_time 只能指定其一
:param start_time: 开始日期,默认为None,格式为%Y-%m-%d %H:%M:%S,如"2018-10-20 09:10:20"
:param end_time: 结束日期,默认为None,格式为%Y-%m-%d %H:%M:%S,如"2018-10-20 19:10:20"
:param channels: 新闻大类, 默认为None,返回每个大类n条新闻, 如 "global","forex", "commodity", "a-stock"
"""
if start_time is not None or end_time is not None:
try:
start_time = datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
except Exception:
raise ValueError('start_time should be str format like "%Y-%m-%d %H:%M:%S"')
try:
end_time = datetime.datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
except Exception:
raise ValueError('end_time should be str format like "%Y-%m-%d %H:%M:%S"')
start_time = datetime_to_int14(start_time)
end_time = datetime_to_int14(end_time)
if n is not None:
raise ValueError(
"please either specify parameter n, or specify both start_time and end_time"
)
n = 1200
elif n is None:
n = 1
else:
n = ensure_int(n, "n")
if n < 1 or n > 1200:
raise ValueError("n should be in [0, 1200]")
if channels is not None:
channels = ensure_list_of_string(channels)
check_items_in_container(channels, ["global", "forex", "a-stock", "commodity"], "channels")
else:
channels = ["global", "forex", "a-stock", "commodity"]
data = get_client().execute("get_current_news", n, start_time, end_time, channels)
if not data:
return
df = | pd.DataFrame(data, columns=["channel", "datetime", "content"]) | pandas.DataFrame |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = | Period('05Q1') | pandas.tseries.period.Period |
from __future__ import print_function
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import numpy as np
import pandas as pd
import sys
import math
from .util import tf_xavier_init
import csv
import datetime
import os
from random import randint
class RBM:
def __init__(self,
n_visible,
n_hidden,
learning_rate=0.01,
momentum=0.95,
xavier_const=1.0,
err_function='mse',
use_tqdm=False,
# DEPRECATED:
tqdm=None,
init_weight_scheme ='xavier',
init_bias_scheme = 'zeros',
rbmName = None,
stddev_par = 0.05):
dirname = os.path.dirname(__file__)
self.current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S.%f")
random_id = randint(1000,9999)
self.serial_number = self.current_time + '_' + str(random_id)
self.train_log_dir_raw_params = os.path.join(dirname, 'logs/rbms/raw/params/' + self.current_time + '_' + str(random_id))
# initialize list
hyperparams = [self.current_time, rbmName, n_visible, n_hidden, learning_rate, momentum, xavier_const, err_function, init_weight_scheme, init_bias_scheme, stddev_par]
# Create the pandas DataFrame
self.df_hyperparams = pd.DataFrame([hyperparams], columns = ['Timestamp','rbmName', 'n_visible', 'n_hidden', 'learning_rate','momentum','xavier_const','err_function','init_weight_scheme','init_bias_scheme', 'stddev_par'])
self.train_log_dir_raw_vals = os.path.join(dirname, 'logs/rbms/raw/vals/' + self.current_time + '_' + str(random_id))
self.train_log_dir_raw_svd = os.path.join(dirname, 'logs/rbms/raw/svd/' + self.current_time + '_' + str(random_id))
self.train_log_dir_raw_weight = os.path.join(dirname, 'logs/rbms/raw/weight/' + self.current_time + '_' + str(random_id))
self.val_meanError = []
self.val_pearson = []
self.val_svd = []
if not 0.0 <= momentum <= 1.0:
raise ValueError('momentum should be in range [0, 1]')
if err_function not in {'mse', 'cosine'}:
raise ValueError('err_function should be either \'mse\' or \'cosine\'')
self._use_tqdm = use_tqdm
self._tqdm = None
if use_tqdm or tqdm is not None:
from tqdm import tqdm
self._tqdm = tqdm
self.n_visible = n_visible
self.n_hidden = n_hidden
self.learning_rate = learning_rate
self.momentum = momentum
self.rbmName = rbmName
self.init_weight_scheme = init_weight_scheme
self.init_bias_scheme = init_bias_scheme
# self.writer = tf.compat.v1.summary.FileWriter(train_log_dir)
self.x = tf.compat.v1.placeholder(tf.float32, [None, self.n_visible])
self.y = tf.compat.v1.placeholder(tf.float32, [None, self.n_hidden])
if self.init_weight_scheme == 'xavier':
self.w = tf.Variable(tf_xavier_init(self.n_visible, self.n_hidden, const=xavier_const), dtype=tf.float32)
elif self.init_weight_scheme == 'ones':
self.w = tf.Variable(tf.ones([self.n_visible, self.n_hidden], tf.dtypes.float32))
elif self.init_weight_scheme == 'zeros':
self.w = tf.Variable(tf.zeros([self.n_visible, self.n_hidden], tf.dtypes.float32))
elif self.init_weight_scheme == 'gaussian':
self.w = tf.Variable(tf.random.normal([self.n_visible, self.n_hidden], mean=0.0, stddev=stddev_par), dtype=tf.float32)
elif self.init_weight_scheme == 'remerge':
if n_visible==3 and n_hidden==2:
self.w = tf.Variable([[1, 0], [1, 1], [0, 1]], dtype=tf.float32)
elif n_visible==4 and n_hidden==3:
self.w = tf.Variable([[1, 0, 0], [1, 1, 0], [0, 1, 1], [0, 0, 1]], dtype=tf.float32)
elif n_visible==5 and n_hidden==4:
self.w = tf.Variable([[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=tf.float32)
elif self.init_weight_scheme == 'perturbed remerge':
if n_visible==3 and n_hidden==2:
self.w = tf.Variable([[np.random.normal(1, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(1, stddev_par), np.random.normal(1, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(1, stddev_par)]], dtype=tf.float32)
elif n_visible==4 and n_hidden==3:
self.w = tf.Variable([[np.random.normal(1, stddev_par), np.random.normal(0, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(1, stddev_par), np.random.normal(1, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(1, stddev_par), np.random.normal(1, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(0, stddev_par), np.random.normal(1, stddev_par)]], dtype=tf.float32)
elif n_visible==5 and n_hidden==4:
self.w = tf.Variable([[np.random.normal(1, stddev_par), np.random.normal(0, stddev_par), np.random.normal(0, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(1, stddev_par), np.random.normal(1, stddev_par), np.random.normal(0, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(1, stddev_par), np.random.normal(1, stddev_par), np.random.normal(0, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(0, stddev_par), np.random.normal(1, stddev_par), np.random.normal(1, stddev_par)],
[np.random.normal(0, stddev_par), np.random.normal(0, stddev_par), np.random.normal(0, stddev_par), np.random.normal(1, stddev_par)]], dtype=tf.float32)
if self.init_bias_scheme == 'zeros':
self.visible_bias = tf.Variable(tf.zeros([self.n_visible]), dtype=tf.float32)
self.hidden_bias = tf.Variable(tf.zeros([self.n_hidden]), dtype=tf.float32)
self.delta_w = tf.Variable(tf.zeros([self.n_visible, self.n_hidden]), dtype=tf.float32)
self.delta_visible_bias = tf.Variable(tf.zeros([self.n_visible]), dtype=tf.float32)
self.delta_hidden_bias = tf.Variable(tf.zeros([self.n_hidden]), dtype=tf.float32)
self.update_weights = None
self.update_deltas = None
self.compute_hidden = None
self.compute_visible = None
self.compute_visible_from_hidden = None
self._initialize_vars()
assert self.update_weights is not None
assert self.update_deltas is not None
assert self.compute_hidden is not None
assert self.compute_visible is not None
assert self.compute_visible_from_hidden is not None
if err_function == 'cosine':
x1_norm = tf.nn.l2_normalize(self.x, 1)
x2_norm = tf.nn.l2_normalize(self.compute_visible, 1)
cos_val = tf.reduce_mean(tf.reduce_sum(tf.mul(x1_norm, x2_norm), 1))
self.compute_err = tf.acos(cos_val) / tf.constant(np.pi)
else:
self.compute_err = tf.reduce_mean(tf.square(self.x - self.compute_visible))
init = tf.compat.v1.global_variables_initializer()
self.sess = tf.compat.v1.Session()
self.sess.run(init)
def print_serial_number(self):
print(self.serial_number)
def _initialize_vars(self):
pass
def get_err(self, batch_x):
return self.sess.run(self.compute_err, feed_dict={self.x: batch_x})
def get_free_energy(self):
pass
def transform(self, batch_x):
return self.sess.run(self.compute_hidden, feed_dict={self.x: batch_x})
def transform_inv(self, batch_y):
return self.sess.run(self.compute_visible_from_hidden, feed_dict={self.y: batch_y})
def reconstruct(self, batch_x):
return self.sess.run(self.compute_visible, feed_dict={self.x: batch_x})
def partial_fit(self, batch_x):
self.sess.run(self.update_weights + self.update_deltas, feed_dict={self.x: batch_x})
def fit(self,
data_x,
n_epoches=10,
batch_size=10,
shuffle=True,
verbose=True):
assert n_epoches > 0
n_data = data_x.shape[0]
if batch_size > 0:
n_batches = n_data // batch_size + (0 if n_data % batch_size == 0 else 1)
else:
n_batches = 1
if shuffle:
data_x_cpy = data_x.copy()
inds = np.arange(n_data)
else:
data_x_cpy = data_x
errs = []
for e in range(n_epoches):
#if verbose and not self._use_tqdm:
#print('Epoch: {:d}'.format(e))
epoch_errs = np.zeros((n_batches,))
epoch_errs_ptr = 0
if shuffle:
np.random.shuffle(inds)
data_x_cpy = data_x_cpy[inds]
r_batches = range(n_batches)
if verbose and self._use_tqdm:
r_batches = self._tqdm(r_batches, desc='Epoch: {:d}'.format(e), ascii=True, file=sys.stdout)
for b in r_batches:
batch_x = data_x_cpy[b * batch_size:(b + 1) * batch_size]
self.partial_fit(batch_x)
batch_err = self.get_err(batch_x)
epoch_errs[epoch_errs_ptr] = batch_err
epoch_errs_ptr += 1
# summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag='mean err',
# simple_value=epoch_errs.mean())])
self.val_meanError.append(epoch_errs.mean())
# self.writer.add_summary(summary, e)
if self.n_visible==3 and self.n_hidden==2:
pearson = np.corrcoef(np.concatenate(self.sess.run(self.w)),np.concatenate(np.array([[1, .0], [1, 1], [.0, 1]])))[0,1]
elif self.n_visible==4 and self.n_hidden==3:
pearson = np.corrcoef(np.concatenate(self.sess.run(self.w)),np.concatenate(np.array([[1, 0, 0], [1, 1, 0], [0, 1, 1], [0, 0, 1]])))[0,1]
elif self.n_visible==5 and self.n_hidden==4:
pearson = np.corrcoef(np.concatenate(self.sess.run(self.w)),np.concatenate(np.array([[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]])))[0,1]
# summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag='pearson',
# simple_value=pearson)])
self.val_pearson.append(pearson)
try:
s, u, v = tf.linalg.svd(self.w)
self.val_svd.append({'Timestamp': self.current_time,'Epoch': e, 's': self.sess.run(s), 'u': self.sess.run(u), 'v': self.sess.run(v)})
except:
pass
if verbose:
err_mean = epoch_errs.mean()
if self._use_tqdm:
self._tqdm.write('Train error: {:.4f}'.format(err_mean))
self._tqdm.write('')
#else:
#print('Train error: {:.4f}'.format(err_mean))
#print('')
sys.stdout.flush()
errs = np.hstack([errs, epoch_errs])
# self.log_histogram('W', self.w, e)
return errs
def get_weights(self):
return self.sess.run(self.w),\
self.sess.run(self.visible_bias),\
self.sess.run(self.hidden_bias)
def save_weights(self, filename, name):
saver = tf.train.Saver({name + '_w': self.w,
name + '_v': self.visible_bias,
name + '_h': self.hidden_bias})
return saver.save(self.sess, filename)
def set_weights(self, w, visible_bias, hidden_bias):
self.sess.run(self.w.assign(w))
self.sess.run(self.visible_bias.assign(visible_bias))
self.sess.run(self.hidden_bias.assign(hidden_bias))
def load_weights(self, filename, name):
saver = tf.train.Saver({name + '_w': self.w,
name + '_v': self.visible_bias,
name + '_h': self.hidden_bias})
saver.restore(self.sess, filename)
def set_biases(self, data, target_probability=0):
data_mean = np.mean(data,0)
def f(x):
return math.log(x/(1-x+10**(-5)))
data_logprob = np.array([f(xi) for xi in data_mean])
self.visible_bias = tf.convert_to_tensor(data_logprob, tf.dtypes.float32)
if target_probability != 0:
target = np.repeat(target_probability, data.shape[1])
data_targetprob = np.array([f(xi) for xi in target])
self.hidden_bias = tf.convert_to_tensor(data_targetprob, tf.dtypes.float32)
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values."""
# Convert to a numpy array
values = self.sess.run(self.w)
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.compat.v1.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Thus, we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
# summary = tf.compat.v1.Summary(value=[tf.compat.v1.Summary.Value(tag=tag, histo=hist)])
# self.writer.add_summary(summary, step)
# self.writer.flush()
def export_to_csv(self):
# Export as csv
self.df_hyperparams.to_csv(self.train_log_dir_raw_params+'.csv', index = False)
df_meanError = pd.DataFrame({'Timestamp':[self.current_time], 'Val':['meanError'],'num':[self.val_meanError]})
df_pearson = | pd.DataFrame({'Timestamp':[self.current_time], 'Val':['pearson'],'num':[self.val_pearson]}) | pandas.DataFrame |
from typing import List, Tuple
from app.domain.sentiment_data import Sentiment_Type
import nltk
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from app.data.data_handler import DataHandler
from app.service.analyser import Analyser
class UIData_TextInsights:
def __init__(self) -> None:
self.text_statistics = None
self.sent_top_n = None
self.ngram_top_n = None
self.pos_top_n = None
self.file_name = None
self.wordcloud = None
self.idf_ranking = None
self.positives = None
self.negatives = None
self.bigrams = None
self.bigrams_wordcloud = None
self.trigrams = None
self.trigrams_wordcloud = None
self.top_verbs = None
self.top_verbs_wordcloud = None
self.top_nouns = None
self.top_nouns_wordcloud = None
self.top_adjs = None
self.top_adjs_wordcloud = None
self.tfidf_cluster_quality = None
self.tfidf_cluster_top_words = None
pass
def main():
# Resolve dependencies
st.title('My first app')
st.markdown("""---""")
# Run application
run_app()
def hash_data_handler(data_handler):
return (data_handler.file_name, data_handler.stopwords_list)
def hash_text_insights(data: UIData_TextInsights):
return (data.sent_top_n, data.ngram_top_n, data.pos_top_n, data.file_name)
def run_app():
@st.cache
def get_stopwords() -> List[str]:
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('portuguese')
return stopwords
@st.cache(allow_output_mutation=True, hash_funcs={DataHandler:hash_data_handler})
def get_data(text_file: str) -> DataHandler:
print("Cache miss")
anl = Analyser()
dh = DataHandler(stopwords_list=get_stopwords())
dh.receive_data(text_file)
dh.tfidf = anl.compute_tfidf(dh.data)
dh.bigrams = anl.find_bigrams(dh.data)
dh.trigrams = anl.find_trigrams(dh.data)
dh.pos = anl.define_postaggs(dh.data)
dh.pos_verbs_ranking = anl.calculate_postaggs_ranking(dh.pos, "V")
dh.pos_nouns_ranking = anl.calculate_postaggs_ranking(dh.pos, "S")
dh.pos_adjs_ranking = anl.calculate_postaggs_ranking(dh.pos, "A")
dh.sentiment = anl.calculate_sentiment(dh.data)
return dh
file_location = st.sidebar.text_input("Type file location")
if not file_location:
#TODO: Check files
st.warning('Please input a full file name location.')
st.stop()
dh = get_data(file_location)
report_selector = st.sidebar.selectbox("Choose report", ["Text Insights", "Word Insights"])
sentiment_ranking_size = st.sidebar.slider('Sentiment Ranking Size', 5, 25, 5)
ngrams_ranking_size = st.sidebar.slider('N-Grams Ranking Size', 2, 25, 5)
postags_ranking_size = st.sidebar.slider('POS Taggins Ranking Size', 5, 50, 10)
if report_selector == "Text Insights":
dataui = get_text_insights(dh, sentiment_ranking_size, ngrams_ranking_size, postags_ranking_size)
draw_text_insights(dataui)
else:
pass
def draw_text_insights(uidata_text_insighs:UIData_TextInsights) -> None:
with st.container():
st.write(uidata_text_insighs.text_statistics)
st.markdown("""---""")
with st.container():
st.metric("Cluster Quality (0 is best)", uidata_text_insighs.tfidf_cluster_quality)
st.dataframe(uidata_text_insighs.tfidf_cluster_top_words)
with st.container():
col1, col2 = st.columns(2)
with col1:
st.table(uidata_text_insighs.positives)
with col2:
st.table(uidata_text_insighs.negatives)
st.markdown("""---""")
st.image(uidata_text_insighs.wordcloud.to_array())
with st.container():
st.markdown("""---""")
col1, col2 = st.columns(2)
with col1:
st.image(uidata_text_insighs.bigrams_wordcloud.to_array())
st.table(uidata_text_insighs.bigrams)
with col2:
st.image(uidata_text_insighs.trigrams_wordcloud.to_array())
st.table(uidata_text_insighs.trigrams)
with st.container():
col1, col2, col3 = st.columns(3)
with col1:
st.image(uidata_text_insighs.top_verbs_wordcloud.to_array())
st.table(uidata_text_insighs.top_verbs)
with col2:
st.image(uidata_text_insighs.top_nouns_wordcloud.to_array())
st.table(uidata_text_insighs.top_nouns)
with col3:
st.image(uidata_text_insighs.top_adjs_wordcloud.to_array())
st.table(uidata_text_insighs.top_adjs)
@st.cache(hash_funcs={UIData_TextInsights:hash_text_insights})
def get_text_insights(dh: DataHandler, sent_top_n:int=10, ngram_top_n:int = 5, pos_top_n:int=5) -> UIData_TextInsights:
print("Insights - <NAME>")
anl = Analyser()
top_positives = anl.get_top_sentiments(dh.sentiment, Sentiment_Type.POSITIVE, sent_top_n)
top_negatives = anl.get_top_sentiments(dh.sentiment, Sentiment_Type.NEGATIVE, sent_top_n)
dh.tfidf_clusters = anl.compute_tfidf_cluster(dh.data, dh.tfidf)
top_verbs = dh.pos_verbs_ranking[:pos_top_n]
top_nouns = dh.pos_nouns_ranking[:pos_top_n]
top_adjs = dh.pos_adjs_ranking[:pos_top_n]
uidata = UIData_TextInsights()
uidata.sent_top_n = sent_top_n
uidata.ngram_top_n = ngram_top_n
uidata.pos_top_n = pos_top_n
uidata.file_name = dh.file_name
uidata.text_statistics = "This file has {nl} lines and {nw} words".format(nl = dh.line_count, nw=dh.word_count)
uidata.tfidf_cluster_quality = "{x:.2f}".format(x=dh.tfidf_clusters.inertia)
uidata.tfidf_cluster_top_words = pd.DataFrame([(x, ",".join(dh.tfidf_clusters.top_words_clusters[x])) for x in dh.tfidf_clusters.top_words_clusters], columns=("Cluster", "Top Words"))
uidata.idf_ranking = | pd.DataFrame.from_dict(dh.tfidf.idf, orient="index", columns=["Rank"]) | pandas.DataFrame.from_dict |
import gc
import re
import sys
import warnings
import os
import time
from datetime import datetime
import warnings
import pandas as pd
import numpy as np
import hashlib
from collections import defaultdict,Counter
from .sequence_functions import list_diff_outer_join, lcs, filter_lcs
from .os_functions import *
from .df_functions import *
from .config_table import ConfigReader
from .excel_functions import write_format_columns
from .regex_functions import replace_re_special, replace_punctuations
from .decorator_functions import *
from .data_handle_func import *
from pandas.core.indexes.multi import MultiIndex
class CsvSheetClass(object):
def __init__(self, table_path):
self.name = '{}'.format(table_path)
self.visibility = 0
class Handler(object):
def __init__(self, require_file_dir, input_dir,table_dict):
#获取以下两个即可读取所有的原始数据和规则表
#original data files
self.input_dir = input_dir
#config table dict
self.table_dict = table_dict
self.require_file_dir = require_file_dir
# #从concat_data得到的结果记录输入数据的最大最小日期
self.min_max_date_range = ''
#concat_data之后 保存一个原始表提供给后面的match做提取
self.original_complete_header_df = None
@catch_and_print
def get_original2cn_dict(self, header_table_df, file_tag):
"""
将所有原始mapping成中文表头,按国家区分字典
"""
original2cn_dict_list = []
original2cn_dict = defaultdict(str)
fillna_dict = {}
dtype_dict = {}
if file_tag.lower() not in [ x.lower() for x in header_table_df.columns.get_level_values(0) ] :
file_tag = 'Without file tag'
header_column_index = header_table_df.columns.get_level_values(
0) == file_tag.lower()
header_table_df_c = header_table_df.iloc[:, header_column_index]
header_table_first_three_c = header_table_df.loc[:, header_table_df.columns.get_level_values(0)[0]]
# 同时获取填充的
for row, last_three in zip(header_table_df_c.iterrows(), header_table_first_three_c.iterrows()):
# 表头统一小写,换行符,空格全部去掉
row_list = row[1].values
last_three_list = last_three[1].values
a_list = list(row_list)
b_list = list(last_three_list)
a_list = [str(x).lower().strip().replace('\n', '').replace('\xa0', '').replace(' ', '').replace('\t', '')
for x in a_list if x.strip() != '无' and x.strip().lower() != 'none' and x.strip() != '/' and x.strip() != '']
if a_list:
for x in a_list:
original2cn_dict[x] = b_list[2]
# 构建需要合并前填充的字典
c_list = [x for x in a_list if split_colon(x)[0].lower().strip() == 'fillbeforeconcat' or split_colon(x)[0].strip() == '合并前填充']
if c_list:
for x in c_list:
fillna_dict[b_list[2]] = split_colon(x)[1]
if (b_list[1] != '默认' and b_list[1].lower() != 'default' and b_list[1] != '') and b_list[2] != '':
dtype_dict.update({b_list[2]: b_list[1]})
return original2cn_dict, fillna_dict, dtype_dict
#合并读取的数据表格, 该函数需要输入table_dict因为需要读取到, complete_header_df, 和target_cn_columns
@get_run_time
def concat_data(self ):
# 此函数读取放入的数据表,必须要运行
for keys in self.table_dict.keys():
if 'mapping' in keys.lower():
mapping_key = keys
try:
header_table_df = self.table_dict[mapping_key]
except KeyError:
enter_exit('Cannot find mapping configuration sheet!')
complete_header_df = self.table_dict['complete_header_df']
target_cn_columns = self.table_dict['target_cn_columns']
header_table_df = df_fillna_str(header_table_df)
info_path_list = get_walk_abs_files(self.input_dir)
# 检查是否有读取到各国的原始数据
info_path_list = [x for x in info_path_list if '~$' not in x and (
x[-5:].lower() == '.xlsx' or x[-4:].lower() in ['.xls', '.csv'])]
if len(info_path_list) == 0:
enter_exit(f'Cannot find any data file in folder "{self.input_dir}" !\n')
success_sheet_df_list = []
for table_path in info_path_list:
table_p = Path(table_path)
table_stem = table_p.stem
table_suffix = table_p.suffix
# 读取文件名的信息
file_tag = table_stem.split('-')[0].split('_')[0].strip()
# 获取这个文档的映射字典 将原始mapping成中文表头
original2cn_dict, fillna_dict, dtype_dict = self.get_original2cn_dict(header_table_df, file_tag)
if not original2cn_dict:
enter_exit('"Data_processing_configuration" required mapping field "{}" not found !'.format(file_tag))
# 如果是CSV文档
is_csv = False
is_xls_special = False
if table_suffix == '.csv':
is_csv = True
csv_sheet_class = CsvSheetClass(table_stem)
sheets_property_list = [csv_sheet_class]
else:
try:
df_workbook = pd.ExcelFile(table_path)
sheets_property_list = df_workbook.book.sheets()
#试下能不能读取第一个sheet
df_workbook.parse(str(sheets_property_list[0].name))
except : #如果读取失败,尝试读取其他国家xls文档的格式
is_xls_special = True
xls_sheet_class = CsvSheetClass(table_stem)
sheets_property_list = [xls_sheet_class]
# 过滤掉模板数据
for sheets_property in sheets_property_list:
sheet = sheets_property.name
sheet_visibility = sheets_property.visibility
if sheet_visibility == 0: # 只读取可见的Sheet
if is_csv:
df_worksheet = read_csv_data(table_path)
if df_worksheet.empty:
continue
elif is_xls_special: #这个格式的只读第一个sheet
df_worksheet = read_xls_special(table_path)
if df_worksheet.empty:
continue
else:
df_worksheet = df_workbook.parse(str(sheet), na_values='')
# 表头做小写等替换并且,通过字典rename,全部调整成去掉中间空格、去掉一切无意义符号的字段
df_worksheet.columns = [str(x).lower().strip().replace('\n', '').replace('\xa0', '')
.replace(' ', '').replace('\t', '')if x == x else x for x in
df_worksheet.columns]
df_worksheet = dropping_not_mapping(df_worksheet, original2cn_dict, target_cn_columns)
#mapping填入了 + 号
df_worksheet = combine_multi_plus(df_worksheet, original2cn_dict)
#mapping前检查是否有重复的字段,如果原表已经有别的字段映射成"机型",那原表里面的"机型"字段属于要抛弃的字段
df_work_sheet = drop_duplicated_columns_before_rename(df_worksheet, original2cn_dict)
df_worksheet = df_worksheet.rename(original2cn_dict, axis=1)
# 还必须要确认映射的字段没有重复,否则会影响到后面的数据列, 返回一个没有重复的字段列
df_work_sheet = check_mapping_duplicates(df_worksheet, target_cn_columns, table_stem=table_stem)
# 重命名之后,合并前需要填充默认值
df_worksheet = fillna_with_dict(df_worksheet, fillna_dict)
# 检查完重复映射之后 需要再定位一次需要的字段, 注意处理顺序
df_worksheet = func_loc(df_worksheet, target_cn_columns)
if not df_worksheet.empty:
check_mapping_complete(df_worksheet, complete_header_df, original2cn_dict,file_tag=file_tag)
#做一次字段格式处理,可以提示在哪个文档转错,但后面合并还是会把date转成object,所以需要再转一次
complete_header_df = dtype_handle(complete_header_df, dtype_dict)
# 记录成功表格
success_sheet_df_list.append([table_stem, sheet, df_worksheet.shape[0]])
#complete_header_df 是一个完整的表头,可以避免concat造成的表头混乱/缺失,
#但合并会导致字段全变成object(CSV文档,xlsx的输入不受影响)
complete_header_df = pd.concat([complete_header_df, df_worksheet], axis=0, sort=False, ignore_index=True)
print(f'Getting from: "{table_stem}",Sheet:{sheet}, {df_worksheet.shape[0]} rows')
success_sheet_df = pd.DataFrame(success_sheet_df_list, columns=['File Name', 'Source Sheet', 'Success reading records'])
complete_header_df = dtype_handle(complete_header_df, dtype_dict)
self.min_max_date_range = get_min_max_date(complete_header_df)
self.original_complete_header_df = complete_header_df.copy()
print(f'Data rows:{complete_header_df.shape[0]}')
return complete_header_df, success_sheet_df
# 1.字段标准化: 是否考虑先做关联匹配以加速运行? -- 不可行,关联的国家字段需要先标准化才能用来统一关联匹配
@get_run_time
def standardize_columns(self, complete_header_df, standardize_config_df):
# 将字段处理为标准表里的字段(模糊匹配)
table_values = standardize_config_df.values
not_standardize_df_list = []
complete_header_df_sub_list = []
partial_match_not_match_df_list = []
row_counter = 0
for row in table_values:
row_counter += 1
source_column = row[1]
standard_table_name = row[2]
standard_column = row[3]
target_column = row[4]
target_column_edit = row[5]
order_columns = row[6]
replace_dict = get_replace_dict(row[7])
special_syn = [x.lower() for x in row[8].split('\n')] #两边需要同时存在的字符
filter_condition = row[9]
# 标准化模式:简单模糊匹配 -- simple_lcs 严格模糊匹配 -- filter_lcs, 内存配置匹配--number_similarity
standardize_mode = row[10]
#决定最后的结果字段名称
temp_column = ''
if target_column_edit == source_column:
temp_column = source_column
else:
temp_column = target_column_edit
# 先把空值的数据剔除再做模糊匹配
complete_header_df_notna = complete_header_df.loc[complete_header_df[source_column].isna() == False, :]
complete_header_df_nan = complete_header_df.loc[complete_header_df[source_column].isna(), :]
#过滤后全都是空值的话 直接继续下一行
if complete_header_df_notna.empty:
continue
if standard_table_name != '' and standard_column.strip() != '':
mode = get_standard_mode(standardize_mode)
print(f'{row_counter}.Processing standardization for "{source_column}"')
print(f'-- Referencing from table "{standard_table_name}" column "{target_column}",Mode:{standardize_mode}')
standard_table_path = get_require_files(self.require_file_dir, [standard_table_name])[standard_table_name]
# 统一将原始关联表转成str格式
standard_table_df = read_config_table(standard_table_path)
##读取出来的表 统一做删除重复字段处理, 如果模式是机型匹配模式
standard_table_df = remove_duplicate_columns(standard_table_df)
# 标准对照表排序,排序完之后删除重复项,似的做了排序后的结果取的是第一行上市日期最近的机型(后面循环的时候做duplicates删除)
if order_columns != '':
standard_table_df = process_sort_order(standard_table_df, [standard_column], order_columns)
# 需要对标准表做重复删除,类似字段匹配, 但不相同
# 标准化前的第一层简单过滤
filter_condition_2_columns_tag, filter_left_column, filter_right_column = False, '', ''
if filter_condition != '':
filter_condition_2_columns_tag, filter_left_column, filter_right_column = \
get_filter_condition_standardize_tag(filter_condition)
if filter_condition != '' and filter_condition_2_columns_tag == False:
try:
standard_table_df = standard_table_df.query(filter_condition)
except:
enter_exit(f'Standardization: Failed to compile condition: {filter_condition}')
#全部转成半角符号
standard_table_df[standard_column] = standard_table_df[standard_column].apply(
lambda x : normalize_punctuations(x) if type(x) == str else x )
complete_header_df_notna[source_column] = complete_header_df_notna[source_column].apply(
lambda x : normalize_punctuations(x) if type(x) == str else x)
# 标准化前的第二层检查过滤(过滤条件涉及两张表的字段相等(不同国家的机型匹配))
# 如果存在另外一种过滤方式--左右表的字段相等, 需要循环条件 进行模糊匹配
if filter_condition_2_columns_tag:
# 模糊关联匹配之前,必须做去重,防止笛卡尔积现象(模糊匹配防止获取的不是排序第一的数据)
find_lack_columns( standard_table_df, [standard_column, target_column, filter_left_column])
# 需要保留一个模糊匹配需要获取的 target_column
standard_table_df_x = standard_table_df.loc[:, [standard_column, target_column, filter_left_column]]\
.drop_duplicates(subset=[standard_column, filter_left_column], keep='first')
# 循环获取模糊匹配结果
if filter_right_column not in complete_header_df_notna.columns:
enter_exit(f'Standardization Error: Cannot find column:"{filter_right_column}"')
#记录每个的情况
complete_header_df_sub_list = []
for u in complete_header_df_notna[filter_right_column].unique() :
temp_standard_df = standard_table_df_x\
.loc[standard_table_df_x[filter_left_column] == u, [standard_column, target_column]]
standard_dict = temp_standard_df.to_dict()
print(f'Standardizing: "{u}"--"{source_column}"')
standard_dict = {x: y for x, y in zip(
standard_dict[standard_column].values(), standard_dict[target_column].values())}
complete_header_df_sub = complete_header_df_notna.loc[
complete_header_df_notna[filter_right_column] == u, :]
complete_header_df_sub[temp_column] = complete_header_df_sub[source_column].fillna(value='').astype(str)
complete_header_df_sub[temp_column] = complete_header_df_sub[temp_column].apply(
lambda x: standardize_column_func(
x, standard_dict, special_syn, replace_dict, ignore_punctuation=True, mode=mode))
complete_header_df_sub_list.append(complete_header_df_sub)
if len(complete_header_df_sub_list) == 1:
complete_header_df_notna = complete_header_df_sub_list[0]
elif len(complete_header_df_sub_list) >= 2 :
complete_header_df_notna = pd.concat(complete_header_df_sub_list, axis=0, ignore_index=True)
else: #complete_header_df_sub都没有,生成一列空白的结果
complete_header_df_notna[temp_column] = ''
# 如果是普通的df quiry过滤方式
else:
lack_column_list = find_lack_columns(
standard_table_df, [standard_column, target_column],'Standardization reference table')
# 提取出标准列表
standard_dict = standard_table_df.loc[:, [standard_column, target_column]].to_dict()
standard_dict = {x: y for x, y in zip(
standard_dict[standard_column].values(), standard_dict[target_column].values())}
complete_header_df_notna[temp_column] = complete_header_df_notna[source_column]\
.apply(lambda x: standardize_column_func(x, standard_dict, special_syn, replace_dict,
ignore_punctuation=True, mode=mode) if type(x) == str else x)
# 空的和处理过的非空数据记得合并
complete_header_df = pd.concat([complete_header_df_notna, complete_header_df_nan], axis=0, ignore_index=True)
# 需要记录这两个字段分别有哪些记录匹配不上
partial_match_not_match_df = get_partial_not_match(complete_header_df_notna,row_counter,source_column,
standard_table_name, standard_column,
filter_condition, target_column_edit, filter_left_column)
partial_match_not_match_df_list.append(partial_match_not_match_df)
if partial_match_not_match_df_list:
partial_match_not_match_df = pd.concat(partial_match_not_match_df_list, axis=0, ignore_index=True)
# 模糊匹配的因为涉及"获取结果的字段名称"可能会修改成原始字段名称,判断复杂, 故不做记录
print(f'Data rows:{complete_header_df.shape[0]}')
return complete_header_df, partial_match_not_match_df
# 2.字段拆分
@get_run_time
def split_columns(self, complete_header_df, split_table_df):
#拆分字段,并做堆叠, 获取的配置格式 {'拆分字段':[ 标准词组列表]}
split_table_df = df_fillna_str(split_table_df)
split_table_values = split_table_df.values
split_config_dict = { }
for row in split_table_values:
split_column = row[1]
split_table_name = row[2]
split_table_column = row[3]
filter_condition = row[4]
split_mode = row[5]
split_symbol = row[6]
if split_table_column != '' and split_table_name.strip() != '' and split_column.strip() != '' :
split_file_dict = get_require_files(self.require_file_dir,[split_table_name])
split_file_path = split_file_dict[split_table_name]
split_standard_df = read_config_table(split_file_path)
split_standard_df.columns = [ x.strip() if type(x) == str else x for x in split_standard_df.columns ]
if filter_condition:
split_standard_df = df_query(split_standard_df, filter_condition)
if split_table_column not in split_standard_df.columns:
enter_exit(f'Failed to find column "{split_table_column}" in table "{split_table_name}" ')
else:
split_standard_list = [ x for x in split_standard_df[split_table_column].tolist() if x == x and x != '' ]
split_standard_list = sorted(split_standard_list, key= len , reverse=True)
split_config_dict.update({split_column: [split_standard_list, split_symbol ]})
else:
print(f'Standard column used to split is empty, "{split_symbol}" will be the symbol used to split')
split_config_dict.update({split_column:[[], split_symbol] })
complete_header_df = split_column_by_words(complete_header_df, split_config_dict, mode=split_mode)
print(f'Data rows:{complete_header_df.shape[0]}')
return complete_header_df
# 3.字段匹配
@get_run_time
def match_columns(self, complete_header_df, match_table_df):
table_values = df_fillna_str(match_table_df).values
not_match_df_list = []
for row in table_values:
source_columns = split_colon(row[1]) # 目的表字段
join_table_name = row[2] # 关联表
join_columns = split_colon(row[3]) # 关联字段
target_columns = split_colon(row[4]) # 想获取的目标字段
target_columns_edit = split_colon(row[5]) # 结果字段名称
sort_order = row[6] # 匹配前的去重排序
replace_dict = get_replace_dict(row[7])
filter_condition = row[8] # 关联表过滤条件
match_mode = row[9] # 匹配模式:1.case insensitive 2.case sensitive
# For columns with spaces in their name, you can use backtick quoting.
#如果目标字段和重命名的目标字段 长度不等,直接全部改成所需要的字段名称
if len(target_columns_edit) != len(target_columns) :
target_columns_edit = target_columns
if source_columns !='' and len(join_columns) > 0 and len(target_columns) > 0 :
#如果两边的字段数量不一致
if len(source_columns) != len(join_columns):
min_num = min([len(source_columns), len(join_column)])
source_columns = source_columns[:min_num+1]
join_columns = join_columns[:min_num+1]
#如果填了匹配表,开始读取
if join_table_name != '':
join_table_path = get_require_files(self.require_file_dir, [join_table_name])[join_table_name]
join_table_df = read_config_table(join_table_path)
else: #如果没有填匹配表,则使用原始输入表格进行匹配
join_table_df = self.original_complete_header_df
# 检查两张表 有没有字段缺失
find_lack_columns(complete_header_df, source_columns,'Matching, complete_header_df')
find_lack_columns(join_table_df, set(join_columns + target_columns), 'Matching, join_table')
join_table_df = process_join_table(join_table_df=join_table_df, join_columns=join_columns,
target_columns=target_columns, filter_condition = filter_condition,
sort_order = sort_order, join_table_name = join_table_name)
complete_header_df = process_match_complete_table(complete_header_df= complete_header_df,
source_columns = source_columns ,target_columns = target_columns,
join_columns = join_columns, join_table_name = join_table_name)
#重命名进行匹配,必须要做的,无论是否用的外部表匹配
for i in range(len(source_columns)):
join_table_df[source_columns[i]] = join_table_df[join_columns[i]]
# 防止只匹配表只有一列,并且只想获取该列结果
join_table_df, only_one_match_column = check_only_one_match_column(join_table_df, join_columns, target_columns )
# 匹配默认忽略大小写
if type(match_mode) == str and match_mode.lower().replace('-', '') == 'case sensitive':
complete_header_df = pd.merge(complete_header_df, join_table_df, 'left', on=source_columns)
else:
complete_header_df = merge_case_insensitive(complete_header_df, join_table_df, 'left', on=source_columns)
if only_one_match_column:
complete_header_df = complete_header_df.rename({'additional_temp': target_columns_edit[0]}, axis=1)
#重命名,直接列表重新赋值即可
if target_columns_edit != target_columns:
for t1, t2 in zip(target_columns, target_columns_edit):
complete_header_df[t2] = complete_header_df[t1]
# 记录无法匹配到结果的数据,第一个target_columns如果为空,判断为无法匹配
not_match_df = complete_header_df.loc[complete_header_df[target_columns_edit[0]].isna(), source_columns]\
.drop_duplicates()
not_match_df['Content'] = not_match_df[source_columns].agg('+'.join, axis=1)
not_match_df['Matching field'] = '+'.join(source_columns) + ' Match ' + '+'.join(join_columns)
#转换表头顺序
shift_order_list = list(not_match_df.columns)
not_match_df = not_match_df.loc[:,['Matching field','Content']]
not_match_df_list.append(not_match_df)
if join_table_name != '':
not_match_df = | pd.concat(not_match_df_list, axis=0, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
"""
This script has to be run in a directory containing LST1 DL1 datacheck files, of
those containing inf for a whole run, with name pattern
datacheck_dl1_LST-1.Run?????.h5
The corresponding muon*fits files (which are subrun-wise) have to be present
in the same directory.
The output is the file longterm_dl1_check.h5 file, which contains tables with
some run-wise summary values for plotting long-term evolution of the DL1 data.
It also produces an interactive web page, longterm_dl1_check.html with plots
showing the evolution of many such values.
"""
import copy
import glob
from pathlib import Path
import numpy as np
import pandas as pd
import tables
from astropy.table import Table
from bokeh.io import output_file as bokeh_output_file
from bokeh.io import show
from bokeh.layouts import gridplot, column
from bokeh.models import Div, ColumnDataSource, Whisker, HoverTool, Range1d
from bokeh.models.widgets import Tabs, Panel
from bokeh.plotting import figure
from ctapipe.coordinates import EngineeringCameraFrame
from ctapipe.instrument import SubarrayDescription
from lstchain.visualization.bokeh import show_camera
def main():
output_file_name = 'longterm_dl1_check.h5'
files = glob.glob('datacheck_dl1_LST-1.Run?????.h5')
files.sort()
# hardcoded for now, to be eventually read from data:
numpixels = 1855
# subrun-wise tables: cosmics, pedestals, flatfield. One dictionary per
# each. Note that the cosmics table contains also muon ring information!
cosmics = {'runnumber': [],
'subrun': [],
'time': [],
'elapsed_time': [],
'events': [],
'azimuth': [],
'altitude': []}
pedestals = copy.deepcopy(cosmics)
flatfield = copy.deepcopy(cosmics)
# add table-specific fields:
cosmics['num_contained_mu_rings'] = []
cosmics['mu_effi_mean'] = []
cosmics['mu_effi_stddev'] = []
cosmics['mu_width_mean'] = []
cosmics['mu_width_stddev'] = []
cosmics['mu_radius_mean'] = []
cosmics['mu_radius_stddev'] = []
cosmics['mu_intensity_mean'] = []
cosmics['mu_hg_peak_sample'] = []
cosmics['mu_hg_peak_sample_stddev'] = []
cosmics['fraction_pulses_above10'] = [] # fraction of >10 pe pulses
cosmics['fraction_pulses_above30'] = [] # fraction of >30 pe pulses
pedestals['fraction_pulses_above10'] = [] # fraction of >10 pe pulses
pedestals['fraction_pulses_above30'] = [] # fraction of >30 pe pulses
pedestals['charge_mean'] = []
pedestals['charge_stddev'] = []
flatfield['charge_mean'] = []
flatfield['charge_stddev'] = []
flatfield['rel_time_mean'] = []
flatfield['rel_time_stddev'] = []
# now another dictionary for a run-wise table, with no pixel-wise info:
runsummary = {'runnumber': [],
'time': [],
'elapsed_time': [],
'min_altitude': [],
'mean_altitude': [],
'max_altitude': [],
# currently (as of lstchain 0.5.3) event numbers are post-cleaning!:
'num_cosmics': [],
'num_pedestals': [],
'num_flatfield': [],
'num_pedestals_after_cleaning': [],
'num_contained_mu_rings': [],
'ff_charge_mean': [], # camera average of mean pix FF charge
'ff_charge_mean_err': [], # uncertainty of the above
'ff_charge_stddev': [], # camera average
'ff_time_mean': [], # camera average of mean FF time
'ff_time_mean_err': [], # uncertainty of the above
'ff_time_stddev': [], # camera average
'ff_rel_time_stddev': [], # camera-averaged std dev of pixel t
# w.r.t. average of rest of pixels in camera (~ t-resolution)
'ped_charge_mean': [], # camera average of mean pix ped charge
'ped_charge_mean_err':[], # uncertainty of the above
'ped_charge_stddev': [], # camera average
'ped_fraction_pulses_above10': [], # in whole camera
'ped_fraction_pulses_above30': [], # in whole camera
'cosmics_fraction_pulses_above10': [], # in whole camera
'cosmics_fraction_pulses_above30': [], # in whole camera
'mu_effi_mean': [],
'mu_effi_stddev': [],
'mu_width_mean': [],
'mu_width_stddev': [],
'mu_hg_peak_sample_mean': [],
'mu_hg_peak_sample_stddev': [],
'mu_intensity_mean': []}
# and another one for pixel-wise run averages:
pixwise_runsummary = {'ff_pix_charge_mean': [],
'ff_pix_charge_stddev': [],
'ff_pix_rel_time_mean': [],
'ff_pix_rel_time_stddev': [],
'ped_pix_charge_mean': [],
'ped_pix_charge_stddev': [],
'ped_pix_fraction_pulses_above10': [],
'ped_pix_fraction_pulses_above30': [],
'cosmics_pix_fraction_pulses_above10': [],
'cosmics_pix_fraction_pulses_above30': []}
# Needed for the table description for writing it out to the hdf5 file. Because
# of the vector columns we cannot write this out using pandas:
class pixwise_info(tables.IsDescription):
runnumber = tables.Int32Col()
time = tables.Float64Col()
ff_pix_charge_mean = tables.Float32Col(shape=(numpixels))
ff_pix_charge_stddev = tables.Float32Col(shape=(numpixels))
ff_pix_rel_time_mean = tables.Float32Col(shape=(numpixels))
ff_pix_rel_time_stddev = tables.Float32Col(shape=(numpixels))
ped_pix_charge_mean = tables.Float32Col(shape=(numpixels))
ped_pix_charge_stddev = tables.Float32Col(shape=(numpixels))
ped_pix_fraction_pulses_above10 = tables.Float32Col(shape=(numpixels))
ped_pix_fraction_pulses_above30 = tables.Float32Col(shape=(numpixels))
cosmics_pix_fraction_pulses_above10 = tables.Float32Col(shape=(numpixels))
cosmics_pix_fraction_pulses_above30 = tables.Float32Col(shape=(numpixels))
dicts = [cosmics, pedestals, flatfield]
for file in files:
try:
a = tables.open_file(file)
except FileNotFoundError:
print('Could not read file', file, '- skipping...')
continue
print(file)
runnumber = int(file[file.find('.Run')+4:file.find('.Run')+9])
datatables = []
for name in ['/dl1datacheck/cosmics',
'/dl1datacheck/pedestals',
'/dl1datacheck/flatfield']:
try:
node = a.get_node(name)
except Exception:
print(' Table', name, 'is missing!')
datatables.append(None)
continue
datatables.append(node)
subruns = None
# fill data which are common to all tables:
for table, d in zip(datatables, dicts):
if table is None:
continue
d['runnumber'].extend(len(table)*[runnumber])
d['subrun'].extend(table.col('subrun_index'))
d['elapsed_time'].extend(table.col('elapsed_time'))
d['events'].extend(table.col('num_events'))
d['time'].extend(table.col('dragon_time').mean(axis=1))
d['azimuth'].extend(table.col('mean_az_tel'))
d['altitude'].extend(table.col('mean_alt_tel'))
# now fill table-specific quantities. In some cases they are
# pixel-averaged values:
if datatables[0] is not None:
table = a.root.dl1datacheck.cosmics
cosmics['fraction_pulses_above10'].extend(
table.col('num_pulses_above_0010_pe').mean(axis=1) /
table.col('num_events'))
cosmics['fraction_pulses_above30'].extend(
table.col('num_pulses_above_0030_pe').mean(axis=1) /
table.col('num_events'))
if datatables[1] is not None:
table = a.root.dl1datacheck.pedestals
pedestals['fraction_pulses_above10'].extend(
table.col('num_pulses_above_0010_pe').mean(axis=1) /
table.col('num_events'))
pedestals['fraction_pulses_above30'].extend(
table.col('num_pulses_above_0030_pe').mean(axis=1) /
table.col('num_events'))
pedestals['charge_mean'].extend(
table.col('charge_mean').mean(axis=1))
pedestals['charge_stddev'].extend(
table.col('charge_stddev').mean(axis=1))
if datatables[2] is not None:
table = a.root.dl1datacheck.flatfield
flatfield['charge_mean'].extend(
table.col('charge_mean').mean(axis=1))
flatfield['charge_stddev'].extend(
table.col('charge_stddev').mean(axis=1))
flatfield['rel_time_mean'].extend(
table.col('relative_time_mean').mean(axis=1))
flatfield['rel_time_stddev'].extend(
table.col('relative_time_stddev').mean(axis=1))
table = a.root.dl1datacheck.cosmics
# needed later for the muons:
subruns = table.col('subrun_index')
# now fill the run-wise table:
runsummary['runnumber'].extend([runnumber])
runsummary['time'].extend([table.col('dragon_time').mean()])
runsummary['elapsed_time'].extend([table.col('elapsed_time').sum()])
runsummary['min_altitude'].extend([table.col('mean_alt_tel').min()])
runsummary['mean_altitude'].extend([table.col('mean_alt_tel').mean()])
runsummary['max_altitude'].extend([table.col('mean_alt_tel').max()])
runsummary['num_cosmics'].extend([table.col('num_events').sum()])
runsummary['cosmics_fraction_pulses_above10'].extend(
[(table.col('num_pulses_above_0010_pe').mean(axis=1)).sum() /
runsummary['num_cosmics'][-1]])
runsummary['cosmics_fraction_pulses_above30'].extend(
[(table.col('num_pulses_above_0030_pe').mean(axis=1)).sum() /
runsummary['num_cosmics'][-1]])
pixwise_runsummary['cosmics_pix_fraction_pulses_above10'].extend(
[table.col('num_pulses_above_0010_pe').sum(axis=0) /
runsummary['num_cosmics'][-1]])
pixwise_runsummary['cosmics_pix_fraction_pulses_above30'].extend(
[table.col('num_pulses_above_0030_pe').sum(axis=0) /
runsummary['num_cosmics'][-1]])
if datatables[1] is not None:
table = a.root.dl1datacheck.pedestals
nevents = table.col('num_events') # events per subrun
events_in_run = nevents.sum()
runsummary['num_pedestals'].extend([table.col('num_events').sum()])
runsummary['num_pedestals_after_cleaning'].extend([table.col(
'num_cleaned_events').sum()])
runsummary['ped_fraction_pulses_above10'].extend([(table.col('num_pulses_above_0010_pe').mean(axis=1)).sum()/
runsummary['num_pedestals'][-1]])
runsummary['ped_fraction_pulses_above30'].extend([(table.col('num_pulses_above_0030_pe').mean(axis=1)).sum()/
runsummary['num_pedestals'][-1]])
# Mean pedestal charge through a run, for each pixel:
charge_mean = np.sum(table.col('charge_mean')*nevents[:, None],
axis=0) / events_in_run
# Now store the pixel-averaged mean pedestal charge:
runsummary['ped_charge_mean'].extend([np.nanmean(charge_mean)])
npixels=len(charge_mean)
runsummary['ped_charge_mean_err'].extend([np.nanstd(charge_mean) /
np.sqrt(npixels)])
# Pedestal charge std dev through a run, for each pixel:
charge_stddev =\
np.sqrt(np.sum((table.col('charge_stddev')**2)*nevents[:, None],
axis=0) / events_in_run)
# Store the pixel-averaged pedestal charge std dev:
runsummary['ped_charge_stddev'].extend([np.nanmean(charge_stddev)])
pixwise_runsummary['ped_pix_fraction_pulses_above10'].extend([table.col('num_pulses_above_0010_pe').sum(axis=0)/
runsummary['num_pedestals'][-1]])
pixwise_runsummary['ped_pix_fraction_pulses_above30'].extend([table.col('num_pulses_above_0030_pe').sum(axis=0)/
runsummary['num_pedestals'][-1]])
pixwise_runsummary['ped_pix_charge_mean'].extend(
[table.col('charge_mean').mean(axis=0)])
pixwise_runsummary['ped_pix_charge_stddev'].extend(
[table.col('charge_stddev').mean(axis=0)])
else:
runsummary['num_pedestals'].extend([np.nan])
runsummary['num_pedestals_after_cleaning'].extend([np.nan])
runsummary['ped_fraction_pulses_above10'].extend([np.nan])
runsummary['ped_fraction_pulses_above30'].extend([np.nan])
runsummary['ped_charge_mean'].extend([np.nan])
runsummary['ped_charge_mean_err'].extend([np.nan])
runsummary['ped_charge_stddev'].extend([np.nan])
pixwise_runsummary['ped_pix_fraction_pulses_above10'].extend([numpixels*[np.nan]])
pixwise_runsummary['ped_pix_fraction_pulses_above30'].extend([numpixels*[np.nan]])
pixwise_runsummary['ped_pix_charge_mean'].extend([numpixels*[np.nan]])
pixwise_runsummary['ped_pix_charge_stddev'].extend([numpixels*[np.nan]])
if datatables[2] is not None:
table = a.root.dl1datacheck.flatfield
nevents = table.col('num_events') # events per subrun
events_in_run = nevents.sum()
runsummary['num_flatfield'].extend([events_in_run])
# Mean flat field charge through a run, for each pixel:
charge_mean = np.sum(table.col('charge_mean') * nevents[:, None],
axis=0) / events_in_run
# Mean flat field time through a run, for each pixel:
time_mean = np.sum(table.col('time_mean') * nevents[:, None],
axis=0) / events_in_run
# Now store the pixel-averaged mean charge:
runsummary['ff_charge_mean'].extend([np.nanmean(charge_mean)])
npixels=len(charge_mean)
runsummary['ff_charge_mean_err'].extend([np.nanstd(charge_mean) /
np.sqrt(npixels)])
# FF charge std dev through a run, for each pixel:
charge_stddev =\
np.sqrt(np.sum((table.col('charge_stddev')**2)*nevents[:, None],
axis=0) / events_in_run)
# Store the pixel-averaged FF charge std dev:
runsummary['ff_charge_stddev'].extend([np.nanmean(charge_stddev)])
# Pixel-averaged mean time:
runsummary['ff_time_mean'].extend([np.nanmean(time_mean)])
runsummary['ff_time_mean_err'].extend([np.nanstd(time_mean) /
np.sqrt(npixels)])
# FF time std dev through a run, for each pixel:
time_stddev =\
np.sqrt(np.sum((table.col('time_stddev')**2)*nevents[:, None],
axis=0) / events_in_run)
# Store the pixel-averaged FF time std dev:
runsummary['ff_time_stddev'].extend([np.nanmean(time_stddev)])
rel_time_stddev =\
np.sqrt(np.sum((table.col('relative_time_stddev')**2) *
nevents[:, None], axis=0) / events_in_run)
runsummary['ff_rel_time_stddev'].\
extend([np.nanmean(rel_time_stddev)])
pixwise_runsummary['ff_pix_charge_mean'].extend(
[table.col('charge_mean').mean(axis=0)])
pixwise_runsummary['ff_pix_charge_stddev'].extend(
[table.col('charge_stddev').mean(axis=0)])
pixwise_runsummary['ff_pix_rel_time_mean'].extend(
[table.col('relative_time_mean').mean(axis=0)])
pixwise_runsummary['ff_pix_rel_time_stddev'].extend(
[table.col('relative_time_stddev').mean(axis=0)])
else:
runsummary['num_flatfield'].extend([np.nan])
runsummary['ff_charge_mean'].extend([np.nan])
runsummary['ff_charge_mean_err'].extend([np.nan])
runsummary['ff_charge_stddev'].extend([np.nan])
runsummary['ff_time_mean'].extend([np.nan])
runsummary['ff_time_mean_err'].extend([np.nan])
runsummary['ff_time_stddev'].extend([np.nan])
runsummary['ff_rel_time_stddev'].extend([np.nan])
pixwise_runsummary['ff_pix_charge_mean'].extend([numpixels*[np.nan]])
pixwise_runsummary['ff_pix_charge_stddev'].extend([numpixels*[np.nan]])
pixwise_runsummary['ff_pix_rel_time_mean'].extend(
[numpixels * [np.nan]])
pixwise_runsummary['ff_pix_rel_time_stddev'].extend(
[numpixels * [np.nan]])
a.close()
# Now process the muon files (one per subrun, containing one entry per ring):
empty_files = 0
contained_mu_wholerun = None
num_contained_mu_rings_in_run = 0
for subrun in subruns:
mufile = 'muons_LST-1.Run{0:05d}.{1:04d}.fits'.format(runnumber, subrun)
dat = None
try:
dat = Table.read(mufile, format='fits')
except Exception:
print(' File', mufile, 'not found - going on')
if dat is None or len(dat) == 0:
empty_files += 1
cosmics['num_contained_mu_rings'].extend([0])
cosmics['mu_effi_mean'].extend([np.nan])
cosmics['mu_effi_stddev'].extend([np.nan])
cosmics['mu_width_mean'].extend([np.nan])
cosmics['mu_width_stddev'].extend([np.nan])
cosmics['mu_radius_mean'].extend([np.nan])
cosmics['mu_radius_stddev'].extend([np.nan])
cosmics['mu_intensity_mean'].extend([np.nan])
cosmics['mu_hg_peak_sample'].extend([np.nan])
cosmics['mu_hg_peak_sample_stddev'].extend([np.nan])
continue
df_muons = dat.to_pandas()
# contained and clean muon rings:
contained_mu = df_muons[(df_muons['ring_containment']>0.99)&(df_muons['size_outside']<1.)]
num_contained_mu_rings_in_run += len(contained_mu)
cosmics['num_contained_mu_rings'].extend([len(contained_mu)])
cosmics['mu_effi_mean'].extend([contained_mu['muon_efficiency'].mean()])
cosmics['mu_effi_stddev'].extend([contained_mu['muon_efficiency'].std()])
cosmics['mu_width_mean'].extend([contained_mu['ring_width'].mean()])
cosmics['mu_width_stddev'].extend([contained_mu['ring_width'].std()])
cosmics['mu_radius_mean'].extend([contained_mu['ring_radius'].mean()])
cosmics['mu_radius_stddev'].extend([contained_mu['ring_radius'].std()])
cosmics['mu_intensity_mean'].extend([contained_mu['ring_size'].mean()])
cosmics['mu_hg_peak_sample'].extend([contained_mu['hg_peak_sample'].mean()])
cosmics['mu_hg_peak_sample_stddev'].extend([contained_mu['hg_peak_sample'].std()])
if contained_mu_wholerun is None:
contained_mu_wholerun = df_muons
else:
contained_mu_wholerun = | pd.concat([contained_mu_wholerun, df_muons], ignore_index=True) | pandas.concat |
"""
Combine parsed data from a set of batch processing output files and write to a
single CSV file.
"""
import logging
import os
import re
import csv
import pandas as pd
from .. import io
from .. import parsing
from .. import util
logger = logging.getLogger(__name__)
class OutputStitcher(object):
"""
Given a path to an output folder or list of files, combine parsed data
from files and write CSV.
"""
def __init__(self, path, output_type=None, outputs=None):
logger.debug("creating `OutputStitcher` for path '{}'".format(path))
self.path = path
if output_type is None:
self.type = self._sniff_output_type()
def _sniff_output_type(self):
"""
Return predicted output type based on specified path.
"""
output_types = ['metrics', 'QC', 'counts', 'validation']
output_match = [t.lower() for t in output_types
if re.search(t, self.path)]
if len(output_match):
return output_match[0]
def _get_outputs(self, output_type):
"""
Return list of outputs of specified type.
"""
output_filetypes = {'metrics': 'txt|html',
'qc': 'txt',
'counts': 'txt',
'validation': 'csv'}
return [os.path.join(self.path, f)
for f in os.listdir(self.path)
if re.search(output_type, f)
and not re.search('combined', f)
and re.search(output_filetypes[output_type],
os.path.splitext(f)[-1])]
def _get_parser(self, output_type, output_source):
"""
Return the appropriate parser for the current output file.
"""
parsers = {
'metrics': {'htseq': getattr(io, 'HtseqMetricsFile'),
'picard-rnaseq': getattr(io, 'PicardMetricsFile'),
'picard-markdups': getattr(io, 'PicardMetricsFile'),
'picard-align': getattr(io, 'PicardMetricsFile'),
'picard-alignment': getattr(io, 'PicardMetricsFile'),
'tophat-stats': getattr(io, 'TophatStatsFile')},
'qc': {'fastqc': getattr(io, 'FastQCFile'),
'fastqc-R1': getattr(io, 'FastQCFile'),
'fastqc-R2': getattr(io, 'FastQCFile')},
'counts': {'htseq': getattr(io, 'HtseqCountsFile')},
'validation': {'sexcheck': getattr(io, 'SexcheckFile')}
}
logger.debug("matched parser '{}' for output type '{}' and source '{}'"
.format(parsers[output_type][output_source],
output_type, output_source))
return parsers[output_type][output_source]
def _read_data(self):
"""
Parse and store data for each output file.
"""
outputs = self._get_outputs(self.type)
outputs.sort()
self.data = {}
for o in outputs:
logger.debug("parsing output file '{}'".format(o))
out_items = parsing.parse_output_filename(o)
proclib_id = out_items['sample_id']
out_type = out_items['type']
out_source = out_items['source']
logger.debug("storing data from '{}' in '{}' '{}'".format(
out_source, proclib_id, out_type))
out_parser = self._get_parser(out_type, out_source)(path=o)
self.data.setdefault(
out_type, {}).setdefault(proclib_id, []).append(
{out_source: out_parser.parse()}
)
def _build_table(self):
"""
Combine parsed data into table for writing.
"""
output_data = self.data[self.type]
if self.type == 'counts':
logger.info("combining counts data")
for idx, (sample_id, sample_data) in enumerate(output_data.items()):
data = sample_data[0]['htseq']
data = data.rename(index=str, columns={'count': sample_id})
if idx == 0:
table_data = data
else:
table_data = | pd.merge(table_data, data, on='geneName', sort=True) | pandas.merge |
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from threading import Thread
def insert_data_main(data,cols,filename=".\\OrderData\\order_data.xlsx"):
# dataframe = pd.read_excel(filename)
df = pd.DataFrame([data],columns=cols)
writer = | pd.ExcelWriter(filename,engine='openpyxl') | pandas.ExcelWriter |
# dkscraper.py
import datetime
import logging
from pathlib import Path
import numpy as np
import pandas as pd
DATA_DIR = Path(__file__).parent / 'data'
OFFENSE_PLAY_TYPES = ('pass', 'run', 'qb_spike', 'qb_kneel')
OFFENSE_IMPORTANT_PLAYS = ('pass', 'run')
def convert_top(t):
"""Converts time of possession string to seconds
Args:
t (str): e.g. '1:30'
Returns:
int: e.g. 90
"""
try:
m, s = [int(c) for c in t.split(':')]
return m * 60 + s
except (AttributeError, ValueError):
return 0
def current_season():
"""Gets current season year
Returns:
int: e.g. 2020
"""
td = datetime.datetime.today()
if td.month > 8:
return td.year
return td.year - 1
def current_season_week(sched=None):
"""Gets current week of current season
Args:
sched (DataFrame): default None
Returns:
int: 1 - 17
"""
if sched is None or sched.empty:
sched = schedule()
td = datetime.datetime.today()
seas = current_season()
week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()
this_week = week_starts.loc[week_starts < td].max()
return week_starts.loc[week_starts == this_week].index.values[0]
def dst(df):
"""Gets core dst stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
pass
def gamesmeta(sched=None):
"""Converts schedule to one row per team, two per game"""
if sched is None or sched.empty:
sched = schedule()
h = sched.copy()
a = sched.copy()
h = h.rename(columns={'home_team': 'team', 'away_team': 'opp'})
a = a.rename(columns={'away_team': 'team', 'home_team': 'opp'})
return pd.concat([a, h]).sort_values('game_id')
def passing(df):
"""Gets core passing stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
pass
def plays(df):
"""Gets core pace/plays from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
"""
tp = (
df.query('play_type in @OFFENSE_PLAY_TYPES')
.pivot_table(index=['game_id', 'posteam'],
columns=['play_type'],
values=['play_id'],
aggfunc='count',
fill_value=0)
.pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))
.reset_index()
)
tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)
tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])
tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])
return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')
def receiving(df):
"""Gets core receiving stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
Returns:
DataFrame with columns
targets, receptions, rec_yards, rec_tds,
total_air_yards, inc_air_yards, yac
"""
cols = ['player', 'targets', 'receptions', 'rec_yards', 'rec_tds',
'total_air_yards', 'inc_air_yards', 'yac']
return (
df
.query('play_type == "pass"')
.groupby(['game_id', 'receiver_player_id'])
.agg(targets=('play_type', 'count'),
receptions=('complete_pass', 'sum'),
rec_yards=('yards_gained', 'sum'),
rec_tds=('pass_touchdown', 'sum'),
total_air_yards=('air_yards', 'sum'),
yac=('yards_after_catch', 'sum'))
.assign(inc_air_yards=lambda x: x['total_air_yards'] + x['yac'] - x['rec_yards'])
.reset_index(level=0, drop=True)
.join(df.groupby('receiver_player_id').first()['receiver_player_name'], how='left')
.reset_index(drop=True)
.rename(columns={'receiver_player_name': 'player'})
.loc[:, cols]
.set_index('player')
.fillna(0)
.astype(int)
)
def rushing(df, add_success=False):
"""Gets core rushing stats from play-by-play
Args:
df (DataFrame): play-by-play dataframe
add_success (bool): add success data, default False
Returns:
DataFrame with columns
'rush_att', 'rush_yards', 'rush_tds'
"""
tmp = (
df
.query('play_type == "run"')
.rename(columns={'rusher_player_id': 'player_id', 'rusher_player_name': 'player'})
.groupby(['game_id', 'player_id', 'player'])
.agg(rush_att=('rush_attempt', 'sum'),
rush_yards=('yards_gained', 'sum'),
rush_tds=('rush_touchdown', 'sum'))
.droplevel(0)
.fillna(0)
.astype(int)
)
if add_success:
s = rushing_success_rate(df)
tmp = tmp.join(s.drop('rushes', axis=1), how='left')
return tmp
def rushing_success(row):
"""Determines whether rushing play was success
Based on Chase Stuart / Football perspectives
Returns:
int: 1 if success, 0 otherwise
"""
success = 0
if row.down == 1:
if row.yards_gained >= 6:
success = 1
elif row.yards_gained >= row.ydstogo * .4:
success = 1
elif row.down == 2:
if row.yards_gained >= 6:
success = 1
elif row.yards_gained >= row.ydstogo * .5:
success = 1
elif row.down > 3:
if row.yards_gained >= row.ydstogo:
success = 1
return success
def rushing_success_rate(df):
"""Calculates rushing success rate"""
df['success'] = df.apply(rushing_success, axis=1)
criteria = (df.down > 2) & (df.ydstogo > 5)
return (
df
.loc[~criteria, :]
.rename(columns={'rusher_player_id': 'player_id', 'rusher_player_name': 'player'})
.groupby(['game_id', 'player_id', 'player'])
.agg(successes=('success', 'sum'),
rushes=('rush_attempt', 'sum'))
.assign(success_rate=lambda df_: df_.successes / df_.rushes)
.droplevel(0)
)
def schedule(fn=None):
"""Gets schedule"""
if not fn:
fn = DATA_DIR / 'schedule.parquet'
return | pd.read_parquet(fn) | pandas.read_parquet |
import pytest
import pandas as pd
import altair as alt
from pdvega.tests import utils
def test_line_simple():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.line()
utils.validate_vegalite(plot)
assert plot.mark == "line"
utils.check_encodings(plot, x="index", y="value",
color="variable")
data = plot.data
assert set(pd.unique(data["variable"])) == {"x", "y"}
def test_line_xy():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2], "z": range(5)})
plot = df.vgplot.line(x="x", y="y")
utils.validate_vegalite(plot)
assert plot.mark == "line"
utils.check_encodings(plot, x="x", y="value",
color="variable", order="index")
data = plot.data
assert set(pd.unique(data["variable"])) == {"y"}
def test_series_line():
ser = pd.Series([3, 2, 3, 2, 3])
plot = ser.vgplot.line()
utils.validate_vegalite(plot)
assert plot.mark == "line"
utils.check_encodings(plot, x="index", y="0")
def test_scatter_simple():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.scatter(x="x", y="y")
utils.validate_vegalite(plot)
assert plot.mark == "point"
utils.check_encodings(plot, x="x", y="y")
def test_scatter_color_size():
df = pd.DataFrame(
{"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2], "c": range(5), "s": range(5)}
)
plot = df.vgplot.scatter(x="x", y="y", c="c", s="s")
utils.validate_vegalite(plot)
assert plot.mark == "point"
utils.check_encodings(plot, x="x", y="y", color="c", size="s")
def test_scatter_common_columns():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.scatter(x="x", y="y", c="y")
utils.validate_vegalite(plot)
assert plot.mark == "point"
utils.check_encodings(plot, x="x", y="y", color="y")
def test_bar_simple():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.bar()
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(
plot, x="index", y="value", color="variable",
opacity=utils.IGNORE
)
data = plot.data
assert set(pd.unique(data["variable"])) == {"x", "y"}
assert plot["encoding"]["y"]["stack"] is None
def test_bar_stacked():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.bar(stacked=True)
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, x="index", y="value", color="variable")
data = plot.data
assert set(pd.unique(data["variable"])) == {"x", "y"}
assert plot["encoding"]["y"]["stack"] == "zero"
def test_bar_xy():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.bar(x="x", y="y")
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, x="x", y="value", color="variable")
data = plot.data
assert set(pd.unique(data["variable"])) == {"y"}
assert plot["encoding"]["y"]["stack"] is None
def test_bar_xy_stacked():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.bar(x="x", y="y", stacked=True)
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, x="x", y="value", color="variable")
data = plot.data
assert set(pd.unique(data["variable"])) == {"y"}
assert plot["encoding"]["y"]["stack"] == "zero"
def test_series_bar():
ser = pd.Series([4, 5, 4, 5], index=["A", "B", "C", "D"])
plot = ser.vgplot.bar()
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, x="index", y="0")
def test_barh_simple():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.barh()
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(
plot, y="index", x="value", color="variable",
opacity=utils.IGNORE
)
data = plot.data
assert set(pd.unique(data["variable"])) == {"x", "y"}
assert plot["encoding"]["x"]["stack"] is None
def test_barh_stacked():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.barh(stacked=True)
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, y="index", x="value", color="variable")
data = plot.data
assert set(pd.unique(data["variable"])) == {"x", "y"}
assert plot["encoding"]["x"]["stack"] == "zero"
def test_barh_xy():
df = pd.DataFrame({"x": [1, 4, 2, 3, 5], "y": [6, 3, 4, 5, 2]})
plot = df.vgplot.barh(x="x", y="y")
utils.validate_vegalite(plot)
assert plot.mark == "bar"
utils.check_encodings(plot, x="value", y="x", color="variable")
data = plot.data
assert set( | pd.unique(data["variable"]) | pandas.unique |
###################################
##### gather results and plot #####
###################################
import pandas
import yaml
import shutil
import os
import glob
def gather_result(job_name):
"""
For job_name, return path to the directories
which contain the job-result
"""
dirnames = glob.glob("plot/*")
out_dirnames = []
for dirname in dirnames:
config_file = "{}/config.yml".format(dirname)
try:
with open(config_file) as f:
try :
obj = yaml.load(f)
except Exception as e:
print("Error of yaml.load. Check config file:{}".format(config_file))
if obj["args"].job_name == job_name:
out_dirnames.append(dirname)
except Exception as e:
0
return out_dirnames
def job_to_pkl(job_names):
pkl_name = ""
for job_name in job_names:
pkl_name = "{}_{}".format(pkl_name, job_name)
pkl_name = pkl_name + ".pkl"
os.makedirs("result/", exist_ok=True)
return "result/{}".format(pkl_name)
def convert_results_to_pkl(args, index, job_names, log_files, only_gather_out_of_ntk=False,N=1, thres=10):
### set up jobname
### we train nets for last job_name
### for plot
### index.append("max_eig")
dirnames_list = []
for job_name in job_names:
print("job_name:", job_name)
dirnames = gather_result(job_name)
dirnames_list.append(dirnames)
df = | pandas.DataFrame() | pandas.DataFrame |
#from msilib.schema import Directory
import datanog as nog
from gui import Ui_MainWindow
import scipy.signal as signal
import os
import time
import pickle
import sigprocess as sp
from PyQt5 import QtCore as qtc
from PyQt5 import QtWidgets as qtw
from PyQt5 import Qt as qt
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT as Navi
from matplotlib.figure import Figure
import numpy as np
import pandas as pd
import gc
root = os.getcwd()
class MatplotlibCanvas(FigureCanvasQTAgg):
def __init__(self,parent=None, dpi = 50):
self.fig = Figure(figsize=(6,6), tight_layout=True, dpi=dpi)
self.axes = self.fig.add_subplot(111)
super(MatplotlibCanvas,self).__init__(self.fig)
self.fig.tight_layout()
class Worker(qtc.QRunnable):
def __init__(self, fn):
super(Worker, self).__init__()
self.fn = fn
@qtc.pyqtSlot()
def run(self):
try:
result = self.fn()
except Exception as e:
print(e)
class appnog(qtw.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.msg = ""
self.ui.startbutton.clicked.connect(self.collect)
self.ui.stopbutton.clicked.connect(self.interrupt)
self.ui.openbttn.clicked.connect(self.getFile)
self.ui.calibutton.clicked.connect(self.calibration)
self.ui.linkSensor.clicked.connect(self.linkSens)
self.ui.linkSensor.setEnabled(False)
self.ui.calibutton.setEnabled(False)
self.ui.initbttn.clicked.connect(self.initDevices)
self.ui.loadbttn.clicked.connect(self.loadTF)
self.ui.combo_TF.currentIndexChanged.connect(self.plotTF)
self.datacache = []
self.threadpool = qtc.QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
self.toolbar = None
self.canv = MatplotlibCanvas(self)
#self.ui.vLayout_plot.addWidget(self.canv)
self.toolbarTF = None
self.canvTF = MatplotlibCanvas(self)
#self.ui.vLayout_TF.addWidget(self.canvTF)
def initDevices(self):
global dn, fs, dt
dn = nog.daq()
self.devsens={}
'''
try:
with open(root+'sensors.data', 'rb') as f:
dn.dev = pickle.load(f)
print(root+'sensors.data loaded')
except:
print('no previous sensor data')
for _dev in dn.dev:
self.devsens[str(_dev[0])] = str(_dev[-1])
'''
self.loadDevices()
self.ui.linkSensor.setEnabled(True)
self.ui.calibutton.setEnabled(True)
def pull(self):
for _dev in self.devsens:
dn.config(_dev[0])
time.sleep(dn.dt)
dn.savedata(dn.pulldata(self.ui.label.text()))
self.ui.startbutton.setEnabled(True)
def collect(self):
self.ui.startbutton.setEnabled(False)
worker = Worker(self.pull)
self.threadpool.start(worker)
def loadDevices(self):
try:
self.ui.comboBox.clear()
except :
pass
for _sens in dn.dev:
self.devsens[str(_sens[0])] = str(_sens[-1])
self.ui.comboBox.addItem(str(_sens[0])+'--'+str(_sens[-1]))
print(self.devsens)
def interrupt(self):
dn.state = 0
def getFile(self):
""" This function will get the address of the csv file location
also calls a readData function
"""
os.chdir(root)
try:
os.chdir('DATA')
except :
pass
self.filename = qtw.QFileDialog.getOpenFileName()[0]
print("File :", self.filename)
try:
self.readData()
except Exception:
pass
def calib(self):
dn.calibrate(dn.dev[self.ui.comboBox.currentIndex()])
def calibrate(self):
workal = Worker(self.calib)
self.threadpool.start(workal)
def linkSens(self):
os.chdir(root)
try:
os.chdir('sensors')
except :
pass
self.filename = qtw.QFileDialog.getOpenFileName(directory='home/pi/gordata/sensors')[0]
print("File :", self.filename)
ii = self.ui.comboBox.currentIndex()
dn.dev[ii][-1] = self.filename[25:]
self.loadDevices()
with open(root+'sensors.data', 'wb') as f:
pickle.dump(dn.dev, f)
os.chdir(root)
np.save('devsens.npy', self.devsens)
def readData(self):
self.datacache = pd.read_csv(self.filename, index_col='t')
self.updatePlot(self.datacache)
def loadTF(self):
os.chdir(root)
try:
os.chdir('DATA')
except :
pass
self.filename = qtw.QFileDialog.getOpenFileName(directory='home/pi/gordata/DATA')[0]
print("File :", self.filename)
self.datacache = pd.read_csv(self.filename, index_col='t')
try:
self.ui.combo_TF.clear()
except :
pass
for item in self.datacache.columns:
self.ui.combo_TF.addItem(item)
#self.ui.combo_TF.setCurrentIndex(0)
#self.plotTF()
def plotTF(self):
frame = str(self.ui.combo_TF.currentText())
data = self.datacache[[frame]]
plt.clf()
try:
self.ui.vLayout_TF.removeWidget(self.canvTF)
self.ui.hLayout_TF.removeWidget(self.toolbarTF)
self.toolbarTF = None
self.canvTF = None
except Exception as e:
print('warning =>> '+str(e))
pass
self.canvTF = MatplotlibCanvas(self)
self.toolbarTF = Navi(self.canvTF,self.ui.tab_TF)
self.ui.vLayout_TF.addWidget(self.canvTF,10)
self.ui.hLayout_TF.addWidget(self.toolbarTF)
self.canvTF.axes.cla()
t, f, S_db = sp.spect(data, 1666, print=False)
self.canvTF.axes.set_xlabel('Time')
self.canvTF.axes.set_ylabel('Frequency')
#self.canvTF.axes.set_title('Time-Frequency - {}'.format(frame))
try:
#self.canvTF.axes.pcolormesh(t, f, S_db, shading='gouraud', cmap='turbo')
self.canvTF.axes.imshow(np.flip(S_db,axis=0), aspect='auto', cmap='turbo', interpolation='gaussian', extent=[t[0], t[-1], f[0], f[-1]])
except Exception as e:
print('warning =>> '+str(e))
pass
self.canvTF.draw()
self.canvTF.fig.tight_layout()
def updatePlot(self, plotdata):
plt.clf()
try:
self.ui.vLayout_plot.removeWidget(self.canv)
self.ui.hLayout_plot.removeWidget(self.toolbar)
self.toolbar = None
self.canv = None
except Exception as e:
print('warning =>> '+e)
pass
self.canv = MatplotlibCanvas(self)
self.toolbar = Navi(self.canv,self.ui.tab_plot)
self.ui.hLayout_plot.addWidget(self.toolbar)
self.ui.vLayout_plot.addWidget(self.canv)
self.canv.axes.cla()
try:
self.canv.axes.plot(plotdata)
self.canv.axes.legend(plotdata.columns)
except Exception as e:
print('==>',e)
self.canv.draw()
def showmessage(self, msg):
msgBox = qtw.QMessageBox()
msgBox.setIcon(qtw.QMessageBox.Information)
msgBox.setText(msg)
msgBox.setWindowTitle("Calibration")
msgBox.setStandardButtons(qtw.QMessageBox.Ok | qtw.QMessageBox.Cancel)
return msgBox.exec()
def calibration(self):
os.chdir(root)
if 'sensors' not in os.listdir():
os.mkdir('sensors')
os.chdir('sensors')
device = dn.dev[self.ui.comboBox.currentIndex()]
msg, ok = qtw.QInputDialog().getText(self,
'Name your IMU',
'Type the name of your IMU for calibration: ',
qtw.QLineEdit.Normal)
if ok and msg:
sensor ={'name': msg}
_path = 'rawdata_{}.csv'.format(sensor['name'])
else:
print('cancelled')
return
NS, ok = qtw.QInputDialog().getInt(self, 'Sample Length',
'Number seconds per Position: ',
5, 1, 10, 1)
if ok:
self.NS = NS*dn.fs
print(self.NS)
else:
print('cancelled')
return
ND, ok = qtw.QInputDialog().getInt(self, 'Sample Length',
'Number seconds per Rotation: ',
5, 1, 10,1)
if ok:
self.ND = ND*dn.fs
print(self.ND)
else:
print('cancelled')
return
self.calibrationdata = np.zeros((6*self.NS+3*self.ND, 6))
ii=0
i=0
while ii < 6:
ok = self.showmessage('Move your IMU to the '+str(ii+1)+' position')
if ok:
print('collecting position '+ str(ii+1))
ti = tf = time.perf_counter()
while i<(ii+1)*self.NS:
tf=time.perf_counter()
if tf-ti>=dn.dt:
ti = tf
try:
self.calibrationdata[i,:] = np.array(dn.pull(device))
i+=1
except Exception as e:
print(e)
self.calibrationdata[i,:] = 6*(0,)
else:
print('cancelled')
return
ii+=1
print(i)
ii=0
while ii <3:
ok = self.showmessage('Rotate Cube Around Axis '+str(ii+1))
if ok:
print('collecting rotation '+ str(ii+1))
ti = tf = time.perf_counter()
while i<(6*self.NS+((ii+1)*self.ND)):
tf=time.perf_counter()
if tf-ti>=dn.dt:
ti = tf
try:
self.calibrationdata[i,:] = np.array(dn.pull(device))
i+=1
except Exception as e:
print(e)
self.calibrationdata[i,:] = 6*(0,)
else:
print('cancelled')
return
ii+=1
self.calibrationdata = np.array(self.calibrationdata)
df = | pd.DataFrame(self.calibrationdata) | pandas.DataFrame |
import pandas as pd
import numpy as np
def load_and_process(url_or_path_to_csv_file):
wdata = (
pd.read_csv(url_or_path_to_csv_file)
.rename(columns={'FISCAL_YEAR':'year', 'HEALTH_AUTHORITY': 'h_a', 'HOSPITAL_NAME':'hosp', 'PROCEDURE_GROUP': 'prcd', 'WAITING':'waiting', 'COMPLETED': 'comp', 'COMPLETED_50TH_PERCENTILE': 'wait_med', 'COMPLETED_90TH_PERCENTILE':'wait_90'})
.dropna()
)
wdata['waiting'] = (
wdata['waiting'].str.replace(',','')
.str.replace('<5','3')
)
wdata['comp'] = (
wdata['comp'].str.replace(',','')
.str.replace('<5','3')
)
wdata['waiting'] = | pd.to_numeric(wdata['waiting']) | pandas.to_numeric |
# This script converts an input list of variables from World Bank data into a panel dataset
# Importing required modules
import pandas as pd
# Specifying the location of the World Bank data file
fp = input('Filepath for World Bank data file: ')
# Reading in the data
wb = | pd.read_csv(fp) | pandas.read_csv |
""" Getting final clusters data
:Author: <NAME> <<EMAIL>>
:Date: 2019-09-30
:License: MIT
"""
# Import Libraries
import pandas as pd
import numpy as np
import random
from collections import Counter
def main():
# clusters group user id
clusters_users = pd.read_csv('clusters_final.csv')
# clusters group user id
clusters_users_names = pd.read_csv('clusters_group_names.csv')
# user average sentiment
users_sentiment = pd.read_csv('users_ave_sentiment.csv')
# user favorite book
users_favbook = pd.read_csv('user_book_fav.csv')
# map user idx with user id to extract from this df
ratings_books = pd.read_csv('ratings_books_u80_b10.csv')
# map user id with user goodreads id
user_id_map = pd.read_csv('user_id_map.csv')
# Mapping user idx for clustering to user id from csv
user_idx_id = ratings_books.drop(ratings_books.columns[[1, 2, 3, 4, 5, 6, 8]], axis=1)
user_id_list = list(set(user_idx_id['user_id']))
user_idx_list = list(set(user_idx_id['user_idx']))
user_idx_id = user_idx_id.drop_duplicates()
user_idx_id.columns = ['user_id_csv', 'user_idx']
# map user id with user id gr
user_dict = user_id_map.set_index('user_id_csv').T.to_dict('list')
user_idx_id['user_id_gr'] = user_idx_id['user_id_csv'].map(user_dict)
user_idx_id['user_id_gr'] = user_idx_id['user_id_gr'].str.get(0)
# map user sentiment with user id gr
user_dict = user_id_map.set_index('user_id').T.to_dict('list')
users_sentiment.columns = ['user_id_gr', 'Average sentiment']
# merging DataFrames of user idx and sentiment
test = pd.merge(user_idx_id, users_sentiment, on='user_id_gr')
# merging with clustering DataFrame
test2 = pd.merge(test, clusters_users, on='user_idx')
# merging with users favorite books
users_favbook.columns = ['user_id_csv', 'book_id', 'title']
test3 = pd.merge(test2, users_favbook, on='user_id_csv')
# merging with users names
final = | pd.merge(test3, clusters_users_names, on='user_idx') | pandas.merge |
#!/usr/bin/env python
__copyright__ = "Copyright 2018-2019, The PICRUSt Project"
__license__ = "GPL"
__version__ = "2.2.0-b"
import sys
from collections import defaultdict
from joblib import Parallel, delayed
from os import path
import scipy.stats
import pandas as pd
import numpy as np
import copy
from picrust2.util import (system_call_check, check_files_exist,
make_output_dir, read_seqabun)
from picrust2.metagenome_pipeline import (strat_funcs_by_samples,
metagenome_contributions,
contrib_to_unstrat)
class PathwaysDatabase:
'''Holds all of the reactions/pathways data from the file provided.
This class was taken from HUMAnN2 v0.11.1 and was modified only slightly so
that it could be used without additional classes and functions defined in
HUMAnN2.'''
def _is_optional_reaction(self, item, reaction_names=[]):
'''Check if this reaction is optional.'''
# If item matches reaction name perfectly then it can't be optional.
# Similarly, some reactions can begin with "--" if a reaction begins
# with that it likely would not be optional.
if item in reaction_names or item[0:2] == "--":
return False
elif item[0] == "-":
# Otherwise call it as optional if the item starts with "-".
return True
else:
return False
def _find_reaction_list_and_key_reactions(self, items,
reaction_names=None):
'''Find the reactions in the pathways items and also the key
reactions.'''
reaction_list = []
key_reactions = []
for item in items:
# Ignore items that are not reactions as they are part of pathway
# structure.
if item not in ["(", ")", "", "+", ","]:
# Check if this reaction is optional and remove the first
# character if so.
if self._is_optional_reaction(item, reaction_names):
item = item[1:]
else:
# Record that this is a key reaction (i.e. not optional).
key_reactions.append(item)
reaction_list.append(item)
return reaction_list, key_reactions
def _find_structure(self, items, reaction_names=None):
'''Find the structure of the pathway from the string.'''
# Initialize dictionary that will contain pathway structure (which can
# include multiple levels).
structure = [" "]
levels = {0: structure}
current_level = 0
# Loop through items (which can include reactions and syntax of the
# pathway structure)
for item in items:
if item:
# Check if the item name indicates an optional reaction.
# If so, remove the "-" at the beginning of the name.
if self._is_optional_reaction(item, reaction_names):
item = item[1:]
# Check if this is the start of a list.
if item == "(":
# Initialize empty list at next level.
new_list = [" "]
levels[current_level + 1] = new_list
# Add the new list to the structure.
levels[current_level].append(new_list)
# Update the current level.
current_level += 1
# Check if this is the end of a list.
elif item == ")":
# If so, update the current level to close the list.
current_level -= 1
# Check if this is a delimiter
elif item in ["+", ","]:
# Update the delimiter at the beginning of the list.
levels[current_level][0] = item
else:
levels[current_level].append(item)
return structure
def _set_pathways_structure(self, reactions, reaction_names=None):
'''Determine the pathways structure from the input string.'''
for pathway in reactions:
# Check if the item is a list of items.
if isinstance(reactions[pathway], list):
reactions[pathway] = " ".join(reactions[pathway])
# Split the reactions information by the structured pathways
# delimiter.
reactions[pathway] = reactions[pathway].split(" ")
# Find and store the structure for the pathway.
structure = self._find_structure(reactions[pathway],
reaction_names)
self.__pathways_structure[pathway] = structure
# Find the list of reactions and the key reactions.
reaction_list, key_reactions = self._find_reaction_list_and_key_reactions(reactions[pathway],
reaction_names)
# Store the list of key reactions for the pathway.
self.__key_reactions[pathway] = key_reactions
# Update the reactions dictionary to contain the list of reactions
# instead of the structure string.
reactions[pathway] = reaction_list
return reactions
def _store_pathways(self, reactions):
'''Create the dictionaries of reactions to pathways and pathways to
reactions.'''
for pathway in reactions:
for reaction in reactions[pathway]:
self.__pathways_to_reactions[pathway] = self.__pathways_to_reactions.get(pathway, []) + [reaction]
self.__reactions_to_pathways[reaction] = self.__reactions_to_pathways.get(reaction, []) + [pathway]
def __init__(self, database=None, reaction_names=[]):
'''Load in the pathways data from the database file.'''
self.__pathways_to_reactions = {}
self.__reactions_to_pathways = {}
self.__pathways_structure = {}
self.__key_reactions = {}
if database is not None:
# Check that database file exists.
check_files_exist([database])
file_handle = open(database, "rt")
line = file_handle.readline()
# The database is expected to contain a single line per pathway.
# This line begins with the pathway name and is followed by all
# reactions. Alternatively it could also contain a since pathway
# and reaction link per line if the pathways aren't structured.
reactions = defaultdict(list)
structured_pathway = False
while line:
data = line.strip().split("\t")
if len(data) > 1:
# Remove pathway from this list.
pathway = data.pop(0)
# Add new key-value pair in reactions dict of pathway to
# all reactions.
reactions[pathway] += data
# Check to see if this pathway has structure.
if "(" in data[0]:
structured_pathway = True
line = file_handle.readline()
file_handle.close()
# If this is a structured pathways set, then store the structure.
if structured_pathway:
reactions = self._set_pathways_structure(reactions,
reaction_names)
self._store_pathways(reactions)
def is_structured(self):
'''Return True if this is a set of structured pathways.'''
if self.__pathways_structure:
return True
else:
return False
def add_pathway_structure(self, pathway, structure,
reactions_database=None):
'''Add the string structure for a pathway.'''
reaction_names = None
if reactions_database is not None:
reaction_names = reactions_database.reaction_list()
reactions = self._set_pathways_structure({pathway: structure},
reaction_names)
self._store_pathways(reactions)
def add_pathway(self, pathway, reactions):
'''Add the unstructured pathway.'''
self._store_pathways({pathway: reactions})
def get_structure_for_pathway(self, pathway):
'''Return the structure for a pathway.'''
return copy.deepcopy(self.__pathways_structure.get(pathway, []))
def get_key_reactions_for_pathway(self, pathway):
'''Return the key reactions for a pathway.'''
return copy.copy(self.__key_reactions.get(pathway, []))
def find_reactions(self, pathway):
'''Return the list of reactions associated with the pathway.'''
return copy.copy(self.__pathways_to_reactions.get(pathway, []))
def find_pathways(self, reaction):
'''Return the list of pathways associated with the reaction.'''
return copy.copy(self.__reactions_to_pathways.get(reaction, []))
def reaction_list(self):
'''Return the list of reactions included in the database.'''
return list(self.__reactions_to_pathways.keys())
def pathway_list(self):
'''Return the list of pathways included in the database.'''
return list(self.__pathways_to_reactions.keys())
def get_database(self, min_reactions=1):
'''Return the database as a flat file with a single pathway per
line.'''
data = []
for pathway in sorted(self.__pathways_to_reactions):
reactions = self.__pathways_to_reactions[pathway]
if len(reactions) < min_reactions:
continue
else:
data.append(pathway + " " + " ".join(reactions))
return "\n".join(data)
def pathway_pipeline(inputfile,
mapfile,
out_dir,
proc=1,
run_minpath=True,
coverage=False,
no_regroup=False,
regroup_mapfile=None,
gap_fill_on=True,
per_sequence_contrib=False,
per_sequence_abun=None,
per_sequence_function=None,
wide_table=False,
print_cmds=False):
'''Pipeline containing full pipeline for reading input files, making
calls to functions to run MinPath and calculate pathway abundances and
coverages. Will return: (1) unstratified pathway abundances, (2)
unstratified pathway coverages, (3) stratified pathway abundances, (4)
stratified pathway coverages, (5) pathway abundance predictions per
sequence, (6) pathway coverage predictions per sequence, (7) unstratified
pathway abundances based on per-sequence predictions. An object of class
None will be returned for any non-applicable value.'''
# If no regrouping flag set then set input regrouping mapfile to be None.
if no_regroup:
regroup_mapfile = None
# Read in table of gene family abundances and determine if in unstratified,
# stratified, or contribution format.
in_metagenome, in_format = read_metagenome_input(inputfile)
# Basic checks if --per_sequence_contrib set.
if per_sequence_contrib:
# Throw error if --per_sequence_contrib set, but --per_sequence_abun
# and/or --per_sequence_function not set.
if not per_sequence_abun or not per_sequence_function:
sys.exit("Error: \"--per_sequence_contrib\" option set, but at "
"least one of \"per_sequence_abun\" or "
"\"--per_sequence_function\" were not set. These input "
"arguments need to be specified when "
"\"--per_sequence_contrib\" is used.")
check_files_exist([per_sequence_abun, per_sequence_function])
# Throw error file-format and wide table setting not compatible.
if in_format == "strat" and not wide_table:
sys.exit("Error: stratified table input (deprecated format), but "
"\"--wide_table\" option not set. You should input either an "
"unstratified or contributional table if you do not require "
"a wide-format table.")
if in_format == "contrib" and wide_table and not per_sequence_contrib:
sys.exit("Error: contributional table input, but \"--wide_table\" "
"option set. This option specifies that deprecated "
"wide-format stratified tables should be output, which "
"is only allowed when a wide-format stratified table is "
"input or the --per_sequence_contrib option is set.")
# Remove 'description' column if it exists.
if "description" in in_metagenome.columns:
in_metagenome.drop("description", axis=1, inplace=True)
# Get list of sample ids.
if in_format == "contrib":
samples = in_metagenome['sample'].unique()
else:
samples = [col for col in in_metagenome.columns
if col not in ["function", "sequence"]]
# Initialize reactions to be empty unless regroup mapfile given.
reactions = []
# Regroup functions in input table to different ids if regroup mapfile is
# provided.
if regroup_mapfile:
reactions = read_reaction_names(regroup_mapfile)
in_metagenome = regroup_func_ids(in_metagenome, in_format,
regroup_mapfile, proc)
regrouped_outfile = path.join(out_dir, "regrouped_infile.tsv")
in_metagenome.to_csv(path_or_buf=regrouped_outfile, sep="\t",
index=False)
# Read in pathway structures.
pathways_in = PathwaysDatabase(database=mapfile, reaction_names=reactions)
# Write out mapfile with all structure removed.
if run_minpath:
minpath_mapfile = path.join(out_dir, "parsed_mapfile.tsv")
with open(minpath_mapfile, "w") as out_map:
out_map.write(pathways_in.get_database())
else:
minpath_mapfile = None
# Subset input table of reactions to only those found in pathway database.
in_metagenome = in_metagenome[in_metagenome.function.isin(pathways_in.reaction_list())]
# Initialize output objects to be None (expect for unstratified abundance).
path_cov_unstrat = None
path_cov_strat = None
path_abun_strat = None
path_cov_by_seq = None
path_abun_by_seq = None
path_abun_unstrat_by_seq = None
minpath_out_dir = path.join(out_dir, "minpath_running")
make_output_dir(minpath_out_dir)
if in_format == "contrib":
# Get unstratified and stratified pathway levels.
# Note that stratified tables will only be returned by this step (and
# the "strat" option below) if per_sequence_contrib=False (extra step
# required below).
path_out_raw = Parallel(n_jobs=proc)(delayed(contrib_format_pathway_levels)(
sample_id,
in_metagenome.loc[in_metagenome['sample'] == sample_id],
minpath_mapfile,
minpath_out_dir,
pathways_in,
run_minpath,
coverage,
gap_fill_on,
per_sequence_contrib,
print_cmds)
for sample_id in samples)
elif in_format == "strat":
path_out_raw = Parallel(n_jobs=proc)(delayed(basic_strat_pathway_levels)(
sample_id,
in_metagenome[["function", "sequence", sample_id]],
minpath_mapfile,
minpath_out_dir,
pathways_in,
run_minpath,
coverage,
gap_fill_on,
per_sequence_contrib,
print_cmds)
for sample_id in samples)
# Otherwise the data is in unstratified format, which is more straight-
# forward to process.
else:
path_out_raw = Parallel(n_jobs=proc)(delayed(
unstrat_pathway_levels)(
sample_id,
in_metagenome[["function", sample_id]],
minpath_mapfile,
minpath_out_dir,
pathways_in,
run_minpath,
coverage,
gap_fill_on,
print_cmds)
for sample_id in samples)
# Prep output unstratified DataFrames.
path_raw_abun_unstrat = []
path_raw_cov_unstrat = []
for sample_output in path_out_raw:
path_raw_abun_unstrat += [sample_output[0]]
path_raw_cov_unstrat += [sample_output[1]]
path_abun_unstrat = prep_pathway_df_out(path_raw_abun_unstrat)
path_abun_unstrat.columns = samples
path_abun_unstrat.sort_index(axis=0, inplace=True)
if coverage:
path_cov_unstrat = prep_pathway_df_out(path_raw_cov_unstrat,
num_digits=10)
path_cov_unstrat.columns = samples
path_cov_unstrat.sort_index(axis=0, inplace=True)
else:
path_cov_unstrat = None
# If --per_sequence_contrib not set then prep output stratified
# table the same as the unstratified tables.
if not per_sequence_contrib and in_format != "unstrat":
path_raw_abun_strat = []
for sample_output in path_out_raw:
path_raw_abun_strat += [sample_output[2]]
if in_format == "strat":
path_abun_strat = prep_pathway_df_out(path_raw_abun_strat,
strat_index=True)
path_abun_strat.columns = ["pathway", "sequence"] + samples
path_abun_strat.sort_values(['pathway', 'sequence'], inplace=True)
elif in_format == "contrib":
path_abun_strat = pd.concat(path_raw_abun_strat)
path_abun_strat.sort_values(['sample', 'function', 'taxon'],
inplace=True)
# Calculate pathway levels for each individual sequence (in parallel)
# and then multiply this table by the abundance of each sequence
# within each sample (using same approach as in metagenome pipeline).
if per_sequence_contrib:
per_seq_out_dir = path.join(out_dir, "minpath_running_per_seq")
make_output_dir(per_seq_out_dir)
path_abun_strat, \
path_cov_strat, \
path_abun_by_seq, \
path_cov_by_seq = per_sequence_contrib_levels(sequence_abun=per_sequence_abun,
sequence_func=per_sequence_function,
minpath_map=minpath_mapfile,
per_seq_out_dir=per_seq_out_dir,
pathway_db=pathways_in,
run_minpath=run_minpath,
calc_coverage=coverage,
gap_fill_on=gap_fill_on,
nproc=proc,
regroup_map=regroup_mapfile,
wide_table=wide_table,
print_opt=print_cmds)
if wide_table:
path_abun_unstrat_by_seq = strat_to_unstrat_counts(strat_df=path_abun_strat,
func_col="pathway")
else:
path_abun_unstrat_by_seq = contrib_to_unstrat(contrib_table=path_abun_strat,
sample_order=list(path_abun_unstrat.columns.values))
return(path_abun_unstrat, path_cov_unstrat, path_abun_strat,
path_cov_strat, path_abun_by_seq, path_cov_by_seq,
path_abun_unstrat_by_seq)
def prep_pathway_df_out(in_tab, strat_index=False, num_digits=4):
'''Takes in lists of pathway abundances or coverages in series format and
converts them into pandas dataframe to be output. If index labels are in
stratified format then convert them to columns.'''
# Convert these returned lists of series into pandas dataframes.
in_tab_df = | pd.concat(in_tab, axis=1, sort=True) | pandas.concat |
"""
Functions to extract information from local data.
"""
import pathlib
import re
from typing import Union
import fiona
import geopandas as gpd
import numpy as np
import pandas as pd
def _extract_year(x: Union[str, pathlib.Path]) -> int:
"""
Extracts a four-digit valid year (1900-2099) from a string or path.
If a path is given, the year will be extracted from the stem and
all other parts of the path will be ignored. If there are multiple
four-digit valid years on the string, the first occurrence is
returned.
Parameters
----------
x : str
String to extract the year from.
Returns
-------
int
Four-digit integer representing the year.
"""
if isinstance(x, pathlib.Path):
x = x.stem
expr = r"(?:19|20)\d{2}"
matches = re.findall(expr, x)
if matches:
year = matches[0]
else:
raise Exception("The string does not have any valid year.")
return int(year)
def _get_nearest_year(
dates: pd.Series,
reference_years: Union[list, tuple],
direction: str,
default_year: str = None,
) -> pd.Series:
"""
Get the nearest year for each row in a Series from a given list of
years.
Parameters
----------
dates : Series
Series with datetime-like objects.
reference_years : list or tuple
List of years to round each row to.
direction : str
Whether to search for prior, subsequent, or closest matches. Can
be
- 'backward'
- 'nearest'
- 'forward'
For more information go to:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.merge_asof.html
default_year : str
Default year to take for records that do not have a collection
date or whose collection data did not match with any year. Can be:
- 'last': takes the earliest year in the historical data
- 'fist': takes the latest year in the historical data
- 'none': skips a default year assignation. Keep in mind that
records without a collection date won't be validated.
Returns
-------
Series
Series with the nearest years.
"""
dates = dates.copy()
if not dates.name:
dates.name = "__date"
reference_years = sorted(reference_years)
has_date = dates.notna()
years = pd.to_datetime(dates[has_date]).dt.year
years = years.sort_values()
dummy_df = pd.DataFrame({years.name: reference_years, "__year": reference_years})
result = | pd.merge_asof(years, dummy_df, on=years.name, direction=direction) | pandas.merge_asof |
# -*- coding: utf-8 -*-
"""
Nisupply: A repository for finding and managing files in
(unstructured) neuroimaging datasets.
@author: Johannes.Wiesner
"""
import numpy as np
import pandas as pd
import os
import gzip
import shutil
import re
import pathlib
from warnings import warn
# TO-DO: File extensions should be optional = just return all files you can find
# TO-DO: Searching for a specific order of directories should be included
# (e.g. search for files that contain '\session_1\anat\)
# For that maybe this stackoverflow post helps:
# https://stackoverflow.com/questions/5141437/filtering-os-walk-dirs-and-files
# TO-DO: Allow user also to define regex. You might want to use the pathmatcher
# module for that. For example, this is the offical CAT12
# regex that is also used in CAT12 to find .xml files: ^cat_.*\.xml$
# Using regex instead (or in combination) with/of file_prefix + file_extension
# might be more 'fail-safe' to find exactly the files, that the user is looking for
def find_files(src_dir,file_suffix='.nii.gz',file_prefix=None,preceding_dirs=None,
case_sensitive=True):
'''Find files in a single source directory. Files are found based on a
specified file suffix. Optionally, the function can filter for files
using an optional file prefix and a list of preceding directories that must be
part of the filepath.
Parameters
----------
src_dir: path
A directory that should be searched for files.
file_suffix: str, tuple of strs
One or multiple strings on which the end of the filepath should match.
Default: '.nii.gz'
file_prefix: str, tuple of strs
One or multiple strings on which the beginning of the filepath should match.
Default: None
preceding_dirs: str, list of strs or None
Single name of a directory or list of directories that must be
components of each filepath
Default: None
case_sensitive: Boolean
If True, matching is done by the literal input of file suffixes and
prefixes and files. If False, both the inputs and the files are converted
to lowercase first in order to match on characters only regardless of lower-
or uppercase-writing.
Returns
-------
filepath_list: list
A list containing filepaths for the found files.
'''
# change provided scr_dir path to os-specific slash type
src_dir = os.path.normpath(src_dir)
filepath_list = []
if not case_sensitive:
file_suffix = file_suffix.lower()
file_prefix = file_prefix.lower()
# search for files that match the given file extension.
# if prefix is defined, only append files that match the given prefix
for (paths, dirs, files) in os.walk(src_dir):
for file in files:
if not case_sensitive:
file = file.lower()
if file.endswith(file_suffix):
if file_prefix:
if file.startswith(file_prefix):
filepath_list.append(os.path.join(paths,file))
else:
filepath_list.append(os.path.join(paths,file))
# Filter list of found files by deleting filepaths from list whose path
# components do not match any of the given preceding directories
if preceding_dirs:
# if only one preceding_dirs is provided as string, convert to list
# of single string
if isinstance(preceding_dirs,str):
preceding_dirs = [preceding_dirs]
tagged_files_indices = []
for idx,filepath in enumerate(filepath_list):
if not any(path_component in filepath.split(os.sep) for path_component in preceding_dirs):
tagged_files_indices.append(idx)
for idx in sorted(tagged_files_indices,reverse=True):
del filepath_list[idx]
# Raise Error if no files where found
if len(filepath_list) == 0:
warn('No files that match the given filter(s) where found within this directory {}'.format(src_dir))
return filepath_list
def get_participant_id(filepath,pattern='(sub-)([a-zA-Z0-9]+)',regex_group=2):
'''Extract a participant ID from a filepath using a regex-match'''
match = re.search(pattern,filepath)
if match:
if regex_group:
return match.group(regex_group)
else:
return match.group()
else:
warn(f"Could not extract participant ID from {filepath}")
return np.nan
# FIXME: If scr_dir is list-like, perform sanity check and ensure that
# each participant id is mapped on one and only one source directory (aka.
# both arrays must be the same length).
# FIXME: Both particpant_ids and list-like src_dir should be checked for NaNs.
def get_filepath_df(src_dirs,participant_ids=None,id_pattern='(sub-)([a-zA-Z0-9]+)',regex_group=2,**kwargs):
'''Find files for multiple participants in one or multiple source directories.
Parameters
----------
src_dirs: list-like of str, str
If provided without participant IDS, the function will map each found file
to a participant ID using the given regex pattern.
If a list of directories is provided together with a list of participant
IDs, it is assumed that each directory only contains files for that
particular participant, thus the files are mapped to the respective
participant ID without a regex match.
If provided as a single directory together with a list of participant
IDs, it is assumed that all files of the participants are in the same directory.
The function will then map each found file to one of the given participant IDs using a
specified regex pattern.
participant_ids: list-like, None
A list-like object of unique participant IDs or None
Default: None
id_pattern: regex-pattern
The regex-pattern that is used to extract the participant ID from
each filepath. By default, this pattern uses a BIDS-compliant regex-pattern.
Default: '(sub-)([a-zA-Z0-9]+)'
regex_group: int, or None
If int, match the int-th group of the regex match. If none, just return
match.group(). Default: 2
kwargs: key, value mappings
Other keyword arguments are passed to :func:`nisupply.find_files`
Returns
-------
filepath_df: pd.DataFrame
A data frame with the provided participant IDs in the first column
and all corresponding filepaths in the second column.
'''
if not isinstance(participant_ids,(str,list,pd.Series)):
if isinstance(src_dirs,str):
filepath_list = find_files(src_dir=src_dirs,**kwargs)
participant_ids = [get_participant_id(filepath,id_pattern,regex_group) for filepath in filepath_list]
filepath_df = pd.DataFrame({'participant_id':participant_ids,'filepath':filepath_list})
if isinstance(src_dirs,(list,pd.Series)):
filepath_df = | pd.DataFrame() | pandas.DataFrame |
# !usr/bin/env python
# -*- coding:utf-8 -*-
import traceback
import numpy as np
import pandas as pd
import tushare as ts
from Database.DatabaseEntry import DatabaseEntry
class StockCalendar:
def __init__(self):
self.__calendar = pd.DataFrame({'Date': [], 'IsOpen': []})
def inited(self) -> bool:
pass
# Fetch data from internet.
def fetch_data(self, **kw) -> bool:
df = ts.trade_cal()
if df is None:
return False
df.rename(columns={'calendarDate': 'Date', 'isOpen': 'IsOpen'}, inplace=True)
df.index.name = 'Serial'
self.__calendar = df
return True
# Auto check and update data to DB. Depends on Collector's implementation.
def check_update(self, **kw) -> bool:
return self.force_update()
# Force update all data in DB.
def force_update(self, **kw) -> bool:
return self.fetch_data() and self.dump_to_db()
# Flush memory data to DB
def dump_to_db(self, **kw) -> bool:
if self.__calendar is None:
return False
return DatabaseEntry().get_utility_db().DataFrameToDB('StockCalendar', self.__calendar.reset_index())
def load_from_db(self, **kw) -> bool:
self.__calendar = DatabaseEntry().get_utility_db().DataFrameFromDB('StockCalendar', ["Date", "IsOpen"])
if self.__calendar is not None:
return True
else:
self.__calendar = | pd.DataFrame({'Date': [], 'IsOpen': []}) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.