prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from . import webcat, geo, prices
import pandas as pd
import aljpy
from pathlib import Path
DECISIONS = Path('data/decisions.json')
CUTS = {
'park': 10,
'town': 10,
'propvalue': 10000,
'friends': 45,
'aerial': 30,
'central': 60}
@aljpy.autocache(disk=False, memory=True)
def map_layers():
base = webcat.basemap()
maps = aljpy.dotdict({
'park': geo.green_spaces(base),
'town': geo.town_centers(base),
'propvalue': prices.layer(base)})
if geo.LOCATIONS:
maps.update({
'aerial': geo.aggtim(geo.LOCATIONS['aerial'].values(), 'min'),
'central': geo.aggtim(geo.LOCATIONS['central'].values(), 'mean', interval=10),
'friends': geo.aggtim(geo.LOCATIONS['friends'].values(), 'mean', interval=10)})
return maps
def dataframe(listings):
# Why is the API returning strings sometime?
listings['rental_prices.per_month'] = pd.to_numeric(listings['rental_prices.per_month'])
listings = (listings
.loc[lambda df: pd.to_numeric(df['num_bedrooms']) <= 2]
.loc[lambda df: | pd.to_numeric(df['num_bedrooms']) | pandas.to_numeric |
from rest_framework import permissions, status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.views import APIView
from datetime import date, datetime, timedelta
from django.forms.models import model_to_dict
from django.db.models import Q, Count, F, Sum
from django.db.models.functions import TruncWeek, TruncMonth, TruncYear
from django.apps import apps
from django.core.files.storage import default_storage
from .serializers import *
from .models import *
from .content_based_recommender import ContentBasedRecommender
from .utils import *
from pathlib import Path
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from slugify import slugify
import pandas as pd
import random
import json
import uuid
import os
import pydash
import urllib3
import dotenv
# Read configure file
base_dir = Path(__file__).resolve().parent.parent
module_dir = os.path.dirname(__file__)
mapping_template_file_path = os.path.join(module_dir, 'configuration/mapping_template.json')
schema_table_file_path = os.path.join(module_dir, 'configuration/schema_table.json')
schema_detail_file_path = os.path.join(module_dir, 'configuration/schema_detail.json')
ga4_json = os.path.join(module_dir, 'configuration/ga4.json')
ua_json = os.path.join(module_dir, 'configuration/ua.json')
# Initialize environment variables
dotenv.load_dotenv(os.path.join(base_dir, '.env'))
# Global vaos.environrial
API_KEY = os.environ['API_KEY']
IP_DOMAIN = os.environ['IP_DOMAIN']
scope = 'https://www.googleapis.com/auth/analytics.readonly'
dimensions = ['date', 'eventName', 'pageLocation', 'browser', 'deviceCategory', 'operatingSystem', 'country']
metrics = ['eventCount', 'sessions']
ua_dimensions = ['ga:date', 'ga:eventCategory', 'ga:pagePath', 'ga:browser', 'ga:deviceCategory', 'ga:operatingSystem', 'ga:country']
ua_metrics = ['ga:totalEvents', 'ga:sessions']
@api_view(['GET'])
def home(request):
try:
# Initialize KPI reports
web_activity_report = []
event_report = []
product_report = []
traffics = {}
# Total number of web activities (interactions)
web_activities_file = len(Interaction_f.objects.all())
web_activities_ga = Interaction_ga.objects.all().aggregate(Sum('event_count'))['event_count__sum']
if (web_activities_ga is None):
web_activities_ga = 0
web_activities = web_activities_file + web_activities_ga
# Total number of sessions (a session includes multiple interactions)
sessions_file = len(Interaction_f.objects.values('session_id').distinct())
sessions_ga = Interaction_ga.objects.all().aggregate(Sum('session_count'))['session_count__sum']
if (sessions_ga is None):
sessions_ga = 0
sessions = sessions_file + sessions_ga
# Total number of web activities by page location
pages_file = Interaction_f.objects.all().values('page_location').annotate(total=Count('page_location'))
pages_ga = Interaction_ga.objects.all().values('page_location').annotate(total=Sum('event_count'))
pages = list(pages_file) + list(pages_ga)
if (len(pages)):
pages = pd.DataFrame(pages).groupby(['page_location'], as_index=False).sum().to_dict('r')
pages = sorted(pages, key=lambda k : k['total'], reverse=True)
# Total number of web activities by device categories
device_categories_file = Interaction_f.objects.all().values('device_category').annotate(total=Count('device_category'))
device_categories_ga = Interaction_ga.objects.all().values('device_category').annotate(total=Sum('event_count'))
device_categories = list(device_categories_ga) + list(device_categories_file)
for category in list(device_categories):
type = category['device_category']
if (type not in traffics):
traffics[type] = 0
traffics[type] += category['total']
# Web activities report - Total number of web activities by event name
web_activity_data_file = Interaction_f.objects.all().values('event_name').annotate(total=Count('event_name'))
web_activity_data_ga = Interaction_ga.objects.all().values('event_name').annotate(total=Sum('event_count'))
web_activity_data = list(web_activity_data_file) + list(web_activity_data_ga)
if (len(web_activity_data)):
web_activity_data = pd.DataFrame(web_activity_data).groupby(['event_name'], as_index=False).sum().to_dict('r')
web_activity_report = [(item['event_name'], item['total']) for item in list(web_activity_data)]
# Cultural event report - Total number of cultural events by event type
event_data = Events.objects.all().values('event_type').annotate(total=Count('event_type'))
event_report = [(item['event_type'], item['total']) for item in list(event_data)]
# Cutural product report - Total number of cultural products by product type
product_data = Products.objects.all().values('product_type').annotate(total=Count('product_type'))
product_report = [(item['product_type'], item['total']) for item in list(product_data)]
# Add info for report to generate charts
reports = [
{
'id': 'activity-chart',
'title': 'Statistiques d’activités Web par types',
'data': web_activity_report,
'type': 'pie',
},
{
'id': 'event-chart',
'title': 'Statistiques d’événements par types',
'data': event_report,
'type': 'column'
},
{
'id': 'product-chart',
'title': 'Statistiques d’articles par types',
'data': product_report,
'type': 'column'
},
]
return Response({'reports': reports,
'sessions': sessions,
'webActivities': web_activities,
'traffic': traffics,
'pages': pages}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
class ItemList(APIView):
# Get list of items (all rows) from a table
def get(self, request, item_type):
try:
import_id = request.GET.get('importId', None)
# Read config file
item_list_schema = get_json_info(schema_table_file_path, item_type)
# Get info (model_name of item, list required fields to show, ...)
model_name = item_list_schema['model_name']
fields = item_list_schema['fields']
view_detail = item_list_schema['view_detail']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
if (import_id is not None):
items = Model.objects.filter(import_id=import_id).values(*fields)
else:
items = Model.objects.all().values(*fields)
return Response({
'items': items,
'isViewDetail': view_detail,
}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
class ItemDetail(APIView):
# Get item detail (detail of a row) from a table
def get(self, request, item_type, pk, format=None):
try:
# Read config file
item_detail_schema = get_json_info(schema_detail_file_path, item_type)
item_detail = get_item_detail_form(pk, item_detail_schema)
return Response(item_detail)
except Exception as exception:
return Response({'message': exception})
# Update info
def put(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
update_item_info(item_form)
return Response({'message': 'Update successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# Delete info
def delete(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
delete_item_info(item_form)
return Response({'message': 'Delete successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# New info
def post(self, request, item_type, pk, format=None):
try:
item_form = json.loads(request.body)
update_item_info(item_form)
return Response({'message': 'Create successfully'}, status=status.HTTP_200_OK)
except Exception as exception:
return Response({'message': exception})
# Get data(row) from a table(model)
def get_model_object(model_name, pk):
if (pk != 'form'):
try:
Model = apps.get_model(app_label='dimadb', model_name=model_name)
event = Model.objects.get(id=pk)
return model_to_dict(event)
except Model.DoesNotExist:
return {}
else:
return {}
# Get all information of an object from several tables (event information coming from event, geolocation, ...)
def get_item_detail_form(pk, schema_detail):
form_attributes = {}
# Get info from schema_detail
model_name = schema_detail['model_name']
fields = schema_detail['fields']
m2m_tables = []
o2m_tables = []
if ('m2m_tables' in schema_detail.keys()):
m2m_tables = schema_detail['m2m_tables']
if ('o2m_tables' in schema_detail.keys()):
o2m_tables = schema_detail['o2m_tables']
# Query item from db
Model = apps.get_model(app_label='dimadb', model_name=model_name)
obj = get_model_object(model_name, pk)
if ('id' in obj.keys()):
obj_id = obj['id']
else:
obj_id = None
# List attributes consists field names in primary table
for field in fields:
form_attributes[field] = {}
attribute_type = Model._meta.get_field(field).get_internal_type()
attribute_choices = Model._meta.get_field(field).choices
# Assign value for each field of item
if (field in obj.keys()):
form_attributes[field]['value'] = obj[field]
else:
form_attributes[field]['value'] = ''
# Assign data type for each field of item
if (attribute_choices != None):
form_attributes[field]['type'] = 'select'
form_attributes[field]['choices'] = [
value for (value, name) in attribute_choices]
else:
if (attribute_type == 'IntegerField'):
form_attributes[field]['type'] = 'integer'
elif (attribute_type == 'DecimalField'):
form_attributes[field]['type'] = 'decimal'
elif (attribute_type == 'TextField'):
form_attributes[field]['type'] = 'textarea'
elif (attribute_type == 'DateTimeField' or attribute_type == 'DateField'):
form_attributes[field]['type'] = 'date'
if form_attributes[field]['value'] == '' or form_attributes[field]['value'] is None:
form_attributes[field]['value'] = ''
else:
form_attributes[field]['value'] = form_attributes[field]['value'].strftime(
"%Y-%m-%d")
else:
form_attributes[field]['type'] = 'text'
# List o2m tables conists additional info of item (geolocation, resource, etc.)
# Ex: evet - eventpreference(o2m)
for o2m_table in o2m_tables:
o2m_display_name = o2m_table['display_name']
connected_field = o2m_table['connected_field']
# Get list of rows in o2m table
form_attributes[o2m_display_name] = {}
form_attributes[o2m_display_name]['type'] = 'o2m'
form_attributes[o2m_display_name]['value'] = get_o2m_items(o2m_table, obj_id)
element_attributes = get_item_detail_form('form', o2m_table)
element_attributes['connected_field'] = connected_field
form_attributes[o2m_display_name]['elementAttributes'] = element_attributes
form_info = {
'type': 'object',
'id': uuid.uuid4(),
'attributes': form_attributes,
'removed': False,
'status': 'new' if pk == 'form' else 'created',
'name': model_name
}
# List m2m tables consists additional info of item (geolocation, resource, etc.)
# Ex: event - eventlocation(connected_table, who hold 2 primary keys of two tables) - geolocation(m2m)
for m2m_table in m2m_tables:
# Get config info
m2m_display_name = m2m_table['display_name']
connected_table = m2m_table['connected_table']
connected_field1 = m2m_table['connected_field1']
connected_field2 = m2m_table['connected_field2']
# Get list of rows in m2m table
form_attributes[m2m_display_name] = {}
form_attributes[m2m_display_name]['type'] = 'm2m'
form_attributes[m2m_display_name]['value'] = get_m2m_items(m2m_table, obj_id)
# Create an empty form info for m2m table
element_attributes = get_item_detail_form('form', m2m_table)
element_attributes['connectedAttributes'] = get_item_detail_form('form', connected_table)
element_attributes['connectedAttributes']['connected_field1'] = connected_field1
element_attributes['connectedAttributes']['connected_field2'] = connected_field2
form_attributes[m2m_display_name]['elementAttributes'] = element_attributes
return form_info
# Update item based on form sent from GUI
def update_item_info(form_info, connected_field1_id=None):
status = form_info['status']
obj_id = form_info['attributes']['id']['value']
obj_info = filter_form_object_info(form_info['attributes'])
model_name = form_info['name']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
if ('connected_field' in form_info.keys()):
connected_field = form_info['connected_field']
obj_info[connected_field] = connected_field1_id
if (status == 'new'): # If new info created
new_obj = Model(**obj_info)
new_obj.save()
update_multiple_items('m2m', form_info['attributes'], new_obj.id)
update_multiple_items('o2m', form_info['attributes'], new_obj.id)
if ('connectedAttributes' in form_info.keys()):
connected_field2_id = new_obj.id
create_connected_object(form_info['connectedAttributes'], connected_field1_id, connected_field2_id)
elif (status == 'created'): # If info updated
Model.objects.filter(id=obj_id).update(**obj_info)
updated_obj = Model.objects.get(id=obj_id)
update_multiple_items('m2m', form_info['attributes'], updated_obj.id)
update_multiple_items('o2m', form_info['attributes'], updated_obj.id)
if ('connectedAttributes' in form_info.keys()):
update_item_info(form_info['connectedAttributes'])
else: # If info deleted
delete_item_info(form_info)
# Delete row from database
def delete_item_info(form_info):
obj_id = form_info['attributes']['id']['value']
if (id != ''):
model_name = form_info['name']
Model = apps.get_model(app_label='dimadb', model_name=model_name)
Model.objects.filter(id=obj_id).delete()
delete_multiple_items('m2m', form_info['attributes'])
delete_multiple_items('o2m', form_info['attributes'])
if ('connectedAttributes' in form_info.keys()):
delete_item_info(form_info['connectedAttributes'])
# Get all items in m2m table
def get_m2m_items(m2m_table, connected_field1_id):
m2m_forms = []
if connected_field1_id:
# Get config info
connected_table = m2m_table['connected_table']
connected_field1 = m2m_table['connected_field1']
connected_field2 = m2m_table['connected_field2']
connected_model_name = connected_table['model_name']
# Get connected model objects to query connected_field2_id
ConnectedModel = apps.get_model(app_label='dimadb', model_name=connected_model_name)
filter_params = {connected_field1: connected_field1_id}
connected_objects = list(ConnectedModel.objects.filter(**filter_params))
connected_objects = [model_to_dict(connected_obj) for connected_obj in connected_objects]
# For each connected object (row) in connected table, query and create form for that connected object + foreign object
for connected_obj in connected_objects:
connected_form = get_item_detail_form(connected_obj['id'], connected_table)
m2m_form = get_item_detail_form(connected_obj[connected_field2], m2m_table)
m2m_form['connectedAttributes'] = connected_form
m2m_form['connectedAttributes']['connected_field1'] = connected_field1
m2m_form['connectedAttributes']['connected_field2'] = connected_field2
m2m_forms.append(m2m_form)
return m2m_forms
# Get all items in o2m table
def get_o2m_items(o2m_table, connected_field_id):
o2m_forms = []
if connected_field_id:
# Get config info
o2m_model_name = o2m_table['model_name']
connected_field = o2m_table['connected_field']
# Get o2m model objects
O2MModel = apps.get_model(app_label='dimadb', model_name=o2m_model_name)
filter_params = {connected_field: connected_field_id}
o2m_objects = list(O2MModel.objects.filter(**filter_params))
o2m_objects = [model_to_dict(obj) for obj in o2m_objects]
# Create o2m item form (row)
for o2m_obj in o2m_objects:
o2m_form = get_item_detail_form(o2m_obj['id'], o2m_table)
o2m_form['connected_field'] = connected_field
o2m_forms.append(o2m_form)
return o2m_forms
# Update/New alternately items in m2m/o2m table
def update_multiple_items(table_type, obj, connected_field1_id=None):
for attribute in obj.keys():
if attribute != 'id':
if obj[attribute]['type'] == table_type:
list_values = obj[attribute]['value']
for value in list_values:
update_item_info(value, connected_field1_id)
# Delete alternately items in m2m table
def delete_multiple_items(table_type, obj):
for attribute in obj.keys():
if attribute != 'id':
if obj[attribute]['type'] == table_type:
list_values = obj[attribute]['value']
for value in list_values:
delete_item_info(value)
# Create object in connected table (eventlocation, eventresource, etc)
def create_connected_object(form_info, connected_field1_id, connected_field2_id):
connected_field1 = form_info['connected_field1']
connected_field2 = form_info['connected_field2']
model_name = form_info['name']
obj_info = filter_form_object_info(form_info['attributes'])
obj_info[connected_field1] = connected_field1_id
obj_info[connected_field2] = connected_field2_id
Model = apps.get_model(app_label='dimadb', model_name=model_name)
obj = Model(**obj_info)
obj.save()
#Mapping data in file with data model
def mapping_data(data, template, source_name):
try:
total = 0 # Total object rows in imported data
count = 0 # Total object rows saved in database
if isinstance(data, list):
total = len(data)
# Store history of import
import_info = ImportInfo(table_name=template['model_name'], source_name=source_name)
import_info.save()
# Get info from schema_detail
model_name = template['model_name']
fields = template['fields']
m2m_tables = []
o2m_tables = []
if ('m2m_tables' in template.keys()):
m2m_tables = template['m2m_tables']
if ('o2m_tables' in template.keys()):
o2m_tables = template['o2m_tables']
#Mapping
for obj in data:
obj_info = filter_imported_object_info(fields, obj)
if obj_info:
# Store obj in primary table
obj_info['import_id'] = import_info.id
Model = apps.get_model(app_label='dimadb', model_name=model_name)
new_obj = Model(**obj_info)
new_obj.save()
# Store additional objs in m2m tables
for m2m_table in m2m_tables:
m2m_model_name = m2m_table['model_name']
m2m_sources = m2m_table['sources']
for source in m2m_sources:
m2m_objs = []
if 'array' not in source:
m2m_objs.append(obj)
else:
if (pydash.get(obj, source['array'])):
m2m_objs = pydash.get(obj, source['array'])
for m2m_obj in m2m_objs:
m2m_obj_info = filter_imported_object_info(source['fields'], m2m_obj)
if (m2m_obj_info):
m2m_obj_info['import_id'] = import_info.id
M2MModel = apps.get_model(app_label='dimadb', model_name=m2m_model_name)
new_m2m_obj = M2MModel(**m2m_obj_info)
new_m2m_obj.save()
# Store obj in connected table
# Read configure info
connected_table = source['connected_table']
connected_field1 = source['connected_field1']
connected_field2 = source['connected_field2']
connected_model_name = connected_table['model_name']
connected_obj_info = filter_imported_object_info(connected_table['fields'], m2m_obj)
connected_obj_info[connected_field1] = new_obj.id
connected_obj_info[connected_field2] = new_m2m_obj.id
connected_obj_info['import_id'] = import_info.id
ConnectedModel = apps.get_model(app_label='dimadb', model_name=connected_model_name)
new_connected_obj = ConnectedModel(**connected_obj_info)
new_connected_obj.save()
# Store additional objs in o2m tables
for o2m_table in o2m_tables:
o2m_model_name = o2m_table['model_name']
sources = o2m_table['sources']
for source in sources:
o2m_objs = []
if 'array' not in source:
o2m_objs.append(obj)
else:
if (pydash.get(obj, source['array'])):
o2m_objs = pydash.get(obj, source['array'])
for o2m_obj in o2m_objs:
o2m_obj_info = filter_imported_object_info(source['fields'], o2m_obj)
if (o2m_obj_info):
connected_field = source['connected_field']
o2m_obj_info[connected_field] = new_obj.id
o2m_obj_info['import_id'] = import_info.id
O2MModel = apps.get_model(app_label='dimadb', model_name=o2m_model_name)
new_o2m_obj = O2MModel(**o2m_obj_info)
new_o2m_obj.save()
count += 1
return {'message': 'Import successfully' + '.\n' + 'Import ' + str(count) + '/' + str(total) + 'object(s).'}
else:
return {'message': 'Wrong json format'}
except Exception as error:
return {'message': 'There is an error(duplication, ...).\n' + 'Import ' + str(count) + '/' + str(total) + 'object(s).'}
# Some imported json file required to be reformated before mapping
def reformated_data(json_data, item_type, template_type):
try:
reformated_json_data = []
# Each item type & each template type => reformat differently
if (item_type == 'web-activity' and template_type == 'default'):
list_required_attributes = ['event_date', 'event_timestamp', 'items', 'event_name', 'device', 'geo', 'user_id', 'traffic_source']
list_required_event_params = ['ga_session_id', 'page_title', 'page_location']
for obj in json_data:
new_obj = {}
for attribute in list_required_attributes:
if attribute == 'event_date':
date = pydash.get(obj, attribute)
format_date = date[:4] + '-' + date[4:6] + '-' + date[6:8]
new_obj[attribute] = format_date
elif attribute == 'event_timestamp':
new_obj[attribute] = int(pydash.get(obj, attribute))
else:
new_obj[attribute] = pydash.get(obj, attribute)
for param in obj['event_params']:
key = param['key']
values = param['value']
if (key in list_required_event_params):
for value in values:
if values[value] != None:
new_obj[key] = values[value]
else:
continue
for item in new_obj['items']:
item['item_eventname'] = new_obj['event_name']
reformated_json_data.append(new_obj)
elif (item_type == 'google-analytic' and template_type == 'default'):
list_required_attributes = ['date', 'eventName', 'deviceCategory', 'country', 'pageLocation', 'eventCount', 'sessions', 'operatingSystem', 'browser']
for obj in json_data:
new_obj = {}
for attribute in list_required_attributes:
if attribute == 'date':
date = pydash.get(obj, attribute)
format_date = date[:4] + '-' + date[4:6] + '-' + date[6:8]
new_obj[attribute] = format_date
else:
new_obj[attribute] = pydash.get(obj, attribute)
reformated_json_data.append(new_obj)
return reformated_json_data
except Exception as exception:
return exception
@api_view(['POST'])
@authentication_classes([])
@permission_classes([])
def import_json_file(request, item_type):
try:
# Get request info
files = request.FILES.getlist('files[]')
file = files[0]
json_data = json.load(file)
# Get template configuration info
template_type = request.POST.get('template')
if (template_type is None or template_type == ''):
template_type = 'default'
template = get_json_info(mapping_template_file_path, item_type + '.' + template_type)
is_reformat = template['is_reformat']
# Check reformat
if is_reformat:
json_data = reformated_data(json_data, item_type, template_type)
#Mapping and saving in database
mapping_result = mapping_data(json_data, template, file.name)
return Response(mapping_result, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_mapping_templates(request, item_type):
try:
list_templates = []
json_file = open(mapping_template_file_path)
json_data = json.load(json_file)
json_file.close()
list_templates = [key for key in json_data[item_type]]
return Response({'listTemplates': list_templates}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
@authentication_classes([])
@permission_classes([])
def import_api(request, item_type):
try:
# Get request info
request_body = json.loads(request.body)
url = request_body['url']
bearer_token = request_body['bearerToken']
template_type = request_body['template']
# Get data from url
http = urllib3.PoolManager()
header = {'Accept': '*/*'}
if (bearer_token != ''):
header['Authorization'] = 'Bearer ' + bearer_token
if (template_type is None or template_type == ''):
template_type = 'default'
response = http.request('GET', url, headers=header)
response_body = json.loads(response.data)
response_data = response_body['data']
# Import
mapping_template = get_json_info(mapping_template_file_path, item_type + '.' + template_type)
mapping_result = mapping_data(response_data, mapping_template, url)
return Response(mapping_result, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_import_info(request, item_type):
try:
tables = {
"event": "events",
"article": "products",
"web-activity": "interaction_f",
"google-analytic-report": "interaction_ga",
}
snippets = ImportInfo.objects.filter(table_name=tables[item_type])
serializer = ImportInfoSerializer(snippets, many=True)
return Response({'items': serializer.data}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['DELETE'])
def delete_imported_items(request, item_type, pk):
try:
tables = {
"event": ["events", "businessentity", "entityeventrole", "eventdate"],
"article": ["products", "businessentity", "entityproductrole"],
"web-activity": ["interaction_f"],
"google-analytic-report": ["interaction_ga"]
}
for table in tables[item_type]:
Model = apps.get_model(app_label='dimadb', model_name=table)
Model.objects.filter(import_id=pk).delete()
ImportInfo.objects.filter(id=pk).delete()
return Response({}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Generate recommend api to retrieve recommendation
def generate_recommend_api(level, item_type, recommend_type, quantity, domain, item_url):
api = IP_DOMAIN + '/dimadb/get-list-recommend/?'
api += 'itemType=' + item_type
api += '&level=' + level
api += '&quantity=' + quantity
if (recommend_type):
api += '&recommendType=' + recommend_type
if (domain):
api += '&domain=' + domain
if (item_url):
api += '&itemUrl=' + item_url
return api
recommend_display_fields = {
'events': ['event_id', 'event_name', 'event_type', 'next_date', 'url', 'img', 'location_name'],
'products': ['product_id', 'product_name', 'product_type', 'url', 'img']
}
# Get upcoming recommendation
def get_upcoming(table_name, quantity=1, domain=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
filter_params = {}
if (domain is not None):
if (table_name == 'events'):
filter_params['event_type'] = domain
elif (table_name == 'products'):
filter_params['product_type'] = domain
list_objs = Model.objects.filter(Q(**filter_params))
list_objs = [model_to_dict(obj) for obj in list(list_objs)]
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_objs:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_objs = sorted(list_filtered_obj, key=lambda x: x['next_date'])
else:
list_objs = []
for i in range(0, int(quantity)):
if (i < len(list_objs)):
obj = list_objs[i]
recommend_item = {}
for field in list(display_fields):
recommend_item[field] = obj[field]
list_recommend_items.append(recommend_item)
return list_recommend_items
# Get most popular recommendation
def order_by_score(table_name, list_objs):
if (len(list_objs)):
list_interactions_f = Interaction_f.objects.filter(page_location__in=[obj['url'] for obj in list_objs])
list_interactions_f = [model_to_dict(obj) for obj in list_interactions_f]
if (len(list_interactions_f)):
list_interactions_f = pd.DataFrame(list_interactions_f).groupby(['page_location', 'event_name'], as_index=False)['id'].count().rename(columns={'id':'event_count'}).to_dict('r')
list_interactions_ga = list(Interaction_ga.objects.filter(page_location__in=[obj['url'] for obj in list_objs]).values('page_location', 'event_name', 'event_count'))
list_interactions = list_interactions_f + list_interactions_ga
if (len(list_interactions)):
list_interactions = pd.DataFrame(list_interactions).groupby(['page_location', 'event_name'], as_index=False).sum().to_dict('r')
list_objs_weight = {}
for interaction in list_interactions:
page_location = interaction['page_location']
event_name = interaction['event_name']
event_count = interaction['event_count']
activity_weight = 0
try:
activity_type_info = model_to_dict(WebActivityType.objects.get(name=event_name))
activity_weight = activity_type_info['value']
except:
activity_weight = 1
if page_location not in list_objs_weight:
list_objs_weight[page_location] = 0
list_objs_weight[page_location] += event_count * activity_weight
for obj in list_objs:
if obj['url'] in list_objs_weight:
obj['popular_score'] = list_objs_weight[obj['url']]
else:
obj['popular_score'] = 0
if (len(list_objs)):
list_objs = sorted(list_objs, key=lambda d: d['popular_score'], reverse=True)
else:
list_objs = []
return list_objs
# Get most popular recommendation
def get_most_popular(table_name, quantity=1, domain=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
filter_params = {}
list_interactions = []
if (domain is not None):
if (table_name == 'events'):
filter_params['event_type'] = domain
elif (table_name == 'products'):
filter_params['product_type'] = domain
list_objs = Model.objects.filter(Q(**filter_params))
list_objs = [model_to_dict(obj) for obj in list(list_objs)]
# list_objs = order_by_score(table_name, list_objs)
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_objs:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_objs = sorted(list_filtered_obj, key=lambda x: x['next_date'])
else:
list_objs = []
list_objs = order_by_score(table_name, list_objs)
# if (len(list_objs)):
# list_interactions_f = Interaction_f.objects.filter(page_location__in=[obj['url'] for obj in list_objs])
# list_interactions_f = [model_to_dict(obj) for obj in list_interactions_f]
# if (len(list_interactions_f)):
# list_interactions_f = pd.DataFrame(list_interactions_f).groupby(['page_location', 'event_name'], as_index=False)['id'].count().rename(columns={'id':'event_count'}).to_dict('r')
# list_interactions_ga = list(Interaction_ga.objects.filter(page_location__in=[obj['url'] for obj in list_objs]).values('page_location', 'event_name', 'event_count'))
# list_interactions = list_interactions_f + list_interactions_ga
# if (len(list_interactions)):
# list_interactions = pd.DataFrame(list_interactions).groupby(['page_location', 'event_name'], as_index=False).sum().to_dict('r')
# list_objs_weight = {}
# for interaction in list_interactions:
# page_location = interaction['page_location']
# event_name = interaction['event_name']
# event_count = interaction['event_count']
# activity_weight = 0
# try:
# activity_type_info = model_to_dict(WebActivityType.objects.get(name=event_name))
# activity_weight = activity_type_info['value']
# except:
# activity_weight = 1
# if page_location not in list_objs_weight:
# list_objs_weight[page_location] = 0
# list_objs_weight[page_location] += event_count * activity_weight
# for obj in list_objs:
# if obj['url'] in list_objs_weight:
# obj['popular_score'] = list_objs_weight[obj['url']]
# else:
# obj['popular_score'] = 0
# if (len(list_objs)):
# list_objs = sorted(list_objs, key=lambda d: d['popular_score'], reverse=True)
# else:
# list_objs = []
for i in range(0, int(quantity)):
if (i < len(list_objs)):
obj = list_objs[i]
recommend_item = {}
for field in list(display_fields):
recommend_item[field] = obj[field]
recommend_item['popular_score'] = obj['popular_score']
list_recommend_items.append(recommend_item)
if (len(list_recommend_items) == 0):
list_recommend_items = get_upcoming(table_name, quantity)
return list_recommend_items
# Get similarity recommendation
def get_similar(table_name, quantity=1, item_url=None, recommend_type=None):
Model = apps.get_model(app_label='dimadb', model_name=table_name)
display_fields = recommend_display_fields[table_name]
list_recommend_items = []
item_id = Model.objects.get(url=item_url).id
list_similar_items = ContentBasedRecommender.recommend_items_by_items(table_name=table_name, items_id=item_id)
if (table_name == 'events'):
list_filtered_obj = []
today = datetime.today()
EventDateModel = apps.get_model(app_label='dimadb', model_name='eventdate')
for obj in list_similar_items:
list_event_dates = EventDateModel.objects.filter(event_id=obj['id'], date__gte=today).order_by('date')
list_event_dates = [model_to_dict(obj) for obj in list(list_event_dates)]
if (len(list_event_dates)):
obj['next_date'] = list_event_dates[0]['date']
list_filtered_obj.append(obj)
if (len(list_filtered_obj)):
list_similar_items = sorted(list_filtered_obj, key=lambda x: x['similarity_score'], reverse=True)
else:
list_similar_items = []
if (recommend_type == 'Similar combined with Most popular'):
list_similar_items = order_by_score(table_name, list_similar_items)
for i in range(0, int(quantity)):
if (i < len(list_similar_items)):
similar_obj = list_similar_items[i]
obj = Model.objects.get(id=similar_obj['id'])
obj = model_to_dict(obj)
recommend_item = {}
for field in list(display_fields):
if field in obj:
recommend_item[field] = obj[field]
if (table_name == 'events'):
recommend_item['next_date'] = similar_obj['next_date']
if (recommend_type == 'Similar combined with Most popular'):
recommend_item['popular_score'] = similar_obj['popular_score']
recommend_item['similarity_score'] = similar_obj['similarity_score']
list_recommend_items.append(recommend_item)
if (len(list_recommend_items) == 0):
list_recommend_items = get_upcoming(table_name, quantity)
return list_recommend_items
# Get list of recommend items
def get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url):
list_recommend_items = []
if (level == 'Homepage'):
if (recommend_type == 'Upcoming'):
if (item_type == 'events'):
list_recommend_items = get_upcoming(table_name=item_type, quantity=quantity)
if (recommend_type == 'Most popular'):
if (item_type == 'events'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity)
elif (item_type == 'products'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity)
elif (level == 'Domain'):
if (recommend_type == 'Upcoming'):
if (item_type == 'events'):
list_recommend_items = get_upcoming(table_name=item_type, quantity=quantity, domain=domain)
if (recommend_type == 'Most popular'):
if (item_type == 'events'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity, domain=domain)
elif (item_type == 'products'):
list_recommend_items = get_most_popular(table_name=item_type, quantity=quantity, domain=domain)
else:
if (item_type == 'events'):
list_recommend_items = get_similar(table_name=item_type, quantity=quantity, item_url=item_url, recommend_type=recommend_type)
elif (item_type == 'products'):
list_recommend_items = get_similar(table_name=item_type, quantity=quantity, item_url=item_url, recommend_type=recommend_type)
return list_recommend_items
@api_view(['GET'])
@authentication_classes([])
@permission_classes([])
def get_list_recommend(request):
try:
# Authorization
bearer_token = request.headers.get('Authorization')
if (bearer_token == 'Bearer ' + API_KEY):
# Read request info
level = request.GET.get('level', None)
item_type = request.GET.get('itemType', None)
recommend_type = request.GET.get('recommendType', None)
quantity = request.GET.get('quantity', None)
domain = request.GET.get('domain', None)
item_url = request.GET.get('itemUrl', None)
list_recommend_items = get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url)
return Response({'itemType': item_type, 'recommendType': recommend_type, 'items': list_recommend_items}, status=status.HTTP_200_OK)
else:
return Response({'message': 'Authorization failed'}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as error:
return Response({'message': error})
def get_embedded_link(api, recommend_type, is_gui=False):
recommendItems = ''
if (recommend_type == 'Upcoming'):
recommendItems = 'upComingItems'
elif (recommend_type == 'Most popular'):
recommendItems = 'popularItems'
elif (recommend_type == 'Similar'):
recommendItems = 'similarItems'
elif (recommend_type == 'Similar combined with Most popular'):
recommendItems = 'popularSimilarItems'
else:
recommendItems = 'upComingItems'
embedded_link = ''
css_link = '<link rel="stylesheet" href="' + IP_DOMAIN + '/static/dimadb/css/recommender.css">'
div_link = '<div id="' + recommendItems + '"></div>'
js_link = '<script src="' + IP_DOMAIN + '/static/dimadb/js/recommender.js' + '"></script>'
recommend_link = '<script>' + '\n'
recommend_link += '\tvar ' + recommendItems + ' = getRecommend("' + api + '", "' + API_KEY + '");' + '\n'
recommend_link += '\t' + recommendItems +'.then(res => {' + '\n'
recommend_link += '\t\t//Handle recommend items here' + '\n'
if (is_gui):
recommend_link += '\t\t//Below code shows recommendation GUI' + '\n'
recommend_link += '\t\tgetListView("' + recommendItems + '", res);' + '\n'
else:
recommend_link += '\t\t//Below code shows recommendation results' + '\n'
recommend_link += '\t\tconsole.log(res);' + '\n'
recommend_link += '\t});' + '\n'
recommend_link += '</script>'
embedded_link = css_link + '\n' + div_link + '\n' + js_link + '\n' + recommend_link
return embedded_link
@api_view(['POST'])
def get_recommend_api(request):
try:
# Read request info
body = json.loads(request.body)
level = body['level']
item_type = body['itemType']
recommend_type = body['recommendType']
quantity = body['quantity']
domain = body['domain']
item_url = body['itemUrl']
#Get recommend api + recommend list
api = generate_recommend_api(level, item_type, recommend_type, quantity, domain, item_url)
list_recommend_items = get_recommend_items(level, item_type, recommend_type, quantity, domain, item_url)
embedded_links = [
{
"name": "Script dynamique et intégré dans chaque page (sans la génération des interfaces)",
"link": get_embedded_link(api, recommend_type, is_gui=False),
}, {
"name": "Script dynamique et intégré dans chaque page (avec la génération des interfaces)",
"link": get_embedded_link(api, recommend_type, is_gui=True),
}
]
return Response({
'items': list_recommend_items,
'api': api, 'apiKey': API_KEY,
'embeddedDynamicLinks': embedded_links,
}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
def train_similar_recommend(request):
try:
# Read request info
body = json.loads(request.body)
item_type = body['itemType']
# Training
ContentBasedRecommender.train_items_by_items(table_name=item_type)
# Get similarity recommendation training info
similar_train_info = get_similar_train_info()
return Response({'similarTrainInfo': similar_train_info}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_recommend_info(request):
try:
# Recommend info
# recommend_levels = {
# "Homepage": ["Upcoming", "Most popular"],
# "Domain": ["Upcoming", "Most popular"],
# "Item": ["Similar", "Similar combined with Most popular"]
# }
recommend_types = [
{
"name": "Upcoming",
"displayName": "À venir"
}, {
"name": "Most popular",
"displayName": "Les plus populaires"
}, {
"name": "Similar",
"displayName": "Produits similaires"
}, {
"name": "Similar combined with Most popular",
"displayName": "Produits similaires combinés avec les plus populaires"
}
]
recommend_levels = {
"Homepage": {
"displayName": "Page d'accueil",
"algorithms": [recommend_types[0], recommend_types[1]]
},
"Domain": {
"displayName": "Domaine",
"algorithms": [recommend_types[0], recommend_types[1]]
},
"Item": {
"displayName": "Produit",
"algorithms": [recommend_types[2], recommend_types[3]]
}
}
# Get list domain(item_type)
event_snippets = Events.objects.all()
event_serializer = EventSerializer(event_snippets, many=True)
event_types = Events.objects.values('event_type').distinct()
event_types = [item['event_type'] for item in list(event_types)]
article_snippets = Products.objects.all()
article_serializer = ArticleSerializer(article_snippets, many=True)
article_types = Products.objects.values('product_type').distinct()
article_types = [item['product_type'] for item in list(article_types)]
list_item_infos = {
"events": {
"name": "Événements",
"items": event_serializer.data,
"types": event_types
},
"products": {
"name": "Articles",
"items": article_serializer.data,
"types": article_types
}
}
embedded_links = [
{
"name": "Script fixé et intégré dans la page d'accueil (sans la génération des interfaces)",
"link": get_embedded_recommendation(is_gui=False),
},
{
"name": "Script fixé et intégré dans la page d'accueil (avec la génération des interfaces)",
"link": get_embedded_recommendation(is_gui=True)
}
]
return Response({'embeddedFixedLinks': embedded_links,
'recommendLevels': recommend_levels,
'listItemInfos': list_item_infos}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Get history of similarity recommendation training
def get_similar_train_info():
try:
list_item_types = [{'name': 'Événement', 'value': 'events'},
{'name': 'Article', 'value': 'products'}]
for item_type in list_item_types:
Model = apps.get_model(app_label='dimadb', model_name=item_type['value'])
item_type['number_items'] = len(Model.objects.all())
# Get total number of trained items
if (LdaSimilarityVersion.objects.filter(item_type=item_type['value']).exists()):
obj = LdaSimilarityVersion.objects.filter(item_type=item_type['value']).latest('created_at')
item_type['latest_training_at'] = str(obj)
item_type['number_trained_items'] = model_to_dict(obj)['n_products']
else:
item_type['latest_training_at'] = ''
item_type['number_trained_items'] = 0
# Get total number of items
Model = apps.get_model(app_label='dimadb', model_name=item_type['value'])
item_type['number_items'] = len(Model.objects.all())
return list_item_types
except Exception as error:
return Response({'message': error})
@api_view(['GET'])
def get_configure_info(request):
try:
similar_train_info = get_similar_train_info()
web_activity_types_f = Interaction_f.objects.values('event_name').distinct()
web_activity_types_f = [item['event_name'] for item in list(web_activity_types_f)]
web_activity_types_ga = Interaction_ga.objects.values('event_name').distinct()
web_activity_types_ga = [item['event_name'] for item in list(web_activity_types_ga)]
web_activity_types = list(dict.fromkeys(web_activity_types_f + web_activity_types_ga))
existed_web_activity_types = WebActivityType.objects.values('name').distinct()
existed_web_activity_types = [item['name'] for item in list(existed_web_activity_types)]
web_activity_types = web_activity_types + existed_web_activity_types
web_activity_types = list(dict.fromkeys(web_activity_types))
web_activity_types = [type for type in web_activity_types if type in ['user_engagement', 'scroll', 'page_view']]
web_activities_info = {}
for activity_type in web_activity_types:
try:
activity_type_obj = WebActivityType.objects.get(name=activity_type)
activity_type_obj = model_to_dict(activity_type_obj)
web_activities_info[activity_type] = activity_type_obj['value']
except:
web_activities_info[activity_type] = 0
return Response({'similarTrainInfo': similar_train_info, 'webActivityInfo': web_activities_info}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
@api_view(['POST'])
def update_activity_weight(request):
try:
# Read requestinfo
body = json.loads(request.body)
web_activity_types = body['webActivityInfo']
#Update/new web activity type
for type in web_activity_types:
try:
web_activities = list(WebActivityType.objects.filter(name=type))
# Check whether type exists in WebActivityType table
if (len(web_activities)):
web_activity = web_activities[0]
web_activity.value = web_activity_types[type]
web_activity.save()
else:
new_activity_type = WebActivityType(name=type, value=web_activity_types[type])
new_activity_type.save()
except:
new_activity_type = WebActivityType(name=type, value=web_activity_types[type])
new_activity_type.save()
return Response({}, status=status.HTTP_200_OK)
except Exception as error:
return Response({'message': error})
# Generate report object (info, name, title, data)
def create_report(name, title, data, chart_type, is_change):
return {
'name': name,
'title': title,
'data': data,
'type': chart_type,
'isChange': is_change,
'random': name + str(random.randint(0, 1000)),
}
@api_view(['GET'])
def get_reports(request):
try:
start_date = request.GET.get('startDate', date.today())
end_date = request.GET.get('endDate', date.today())
group_type = request.GET.get('groupBy', 'daily')
reports = []
#Session
if (group_type == 'none'):
sessions_file = Interaction_f.objects.filter(
visit_date__range=[start_date, end_date]).values('session_id').distinct().count()
sessions_ga = Interaction_ga.objects.filter(
date__range=[start_date, end_date]).aggregate(Sum('session_count'))['session_count__sum'] or 0
sessions = [{'type': 'all', 'sum': sessions_file + sessions_ga}]
elif (group_type == 'daily'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date')).annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date')).annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['day'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['day'])
elif (group_type == 'weekly'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['week'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['week'])
elif (group_type == 'monthly'):
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['month'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['month'])
else:
sessions_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year').annotate(sum=Count('session_id', distinct=True))
sessions_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year').annotate(sum=Sum('session_count'))
sessions = list(sessions_file) + list(sessions_ga)
if (len(sessions)):
sessions = pd.DataFrame(sessions).groupby(['year'], as_index=False).sum().to_dict('r')
sessions = sorted(sessions, key=lambda k : k['year'])
reports.append(create_report('session_report', 'Statistiques de sessions Web',
sessions, 'column', group_type == 'none'))
# Web_activities:
if (group_type == 'none'):
web_activities_file = Interaction_f.objects.filter(
visit_date__range=[start_date, end_date]).all().count()
web_activities_ga = Interaction_ga.objects.filter(
date__range=[start_date, end_date]).aggregate(Sum('event_count'))['event_count__sum'] or 0
web_activities = [{'type': 'all', 'sum': web_activities_file + web_activities_ga}]
elif (group_type == 'daily'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(day=F('visit_date')).annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(day=F('date')).annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['day'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['week'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['week'])
elif (group_type == 'monthly'):
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['month'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['month'])
else:
web_activities_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year').annotate(sum=Count('id'))
web_activities_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year').annotate(sum=Sum('event_count'))
web_activities = list(web_activities_file) + list(web_activities_ga)
if (len(web_activities)):
web_activities = pd.DataFrame(web_activities).groupby(['year'], as_index=False).sum().to_dict('r')
web_activities = sorted(web_activities, key=lambda k : k['year'])
reports.append(create_report('web_activities_report',
'Statistiques d’activités Web', web_activities, 'column', group_type == 'none'))
# Web Activities device_category:
if (group_type == 'none'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['type'], as_index=False).sum().to_dict('r')
elif (group_type == 'daily'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date'), type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date'), type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['day', 'type'], as_index=False).sum().to_dict('r')
web_activities_device = sorted(web_activities_device, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week', type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week', type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['week', 'type'], as_index=False).sum().to_dict('r')
web_activities_device = sorted(web_activities_device, key=lambda k : k['week'])
elif (group_type == 'monthly'):
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month', type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month', type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['month', 'type'], as_index=False).sum().to_dict('r')
web_activities_device = sorted(web_activities_device, key=lambda k : k['month'])
else:
web_activities_device_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year', type=F('device_category')).annotate(sum=Count('id'))
web_activities_device_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year', type=F('device_category')).annotate(sum=Sum('event_count'))
web_activities_device = list(web_activities_device_file) + list(web_activities_device_ga)
if (len(web_activities_device)):
web_activities_device = pd.DataFrame(web_activities_device).groupby(['year', 'type'], as_index=False).sum().to_dict('r')
web_activities_device = sorted(web_activities_device, key=lambda k : k['year'])
reports.append(create_report('session_device_report', 'Statistiques d’activités Web par types d’appareils',
web_activities_device, 'column', group_type == 'none'))
# Web Activities browser:
if (group_type == 'none'):
web_activities_browser_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(type=F('browser')).annotate(sum=Count('id'))
web_activities_browser_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(type=F('browser')).annotate(sum=Sum('event_count'))
web_activities_browser = list(web_activities_browser_file) + list(web_activities_browser_ga)
if (len(web_activities_browser)):
web_activities_browser = pd.DataFrame(web_activities_browser).groupby(['type'], as_index=False).sum().to_dict('r')
elif (group_type == 'daily'):
web_activities_browser_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date'), type=F('browser')).annotate(sum=Count('id'))
web_activities_browser_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date'), type=F('browser')).annotate(sum=Sum('event_count'))
web_activities_browser = list(web_activities_browser_file) + list(web_activities_browser_ga)
if (len(web_activities_browser)):
web_activities_browser = pd.DataFrame(web_activities_browser).groupby(['day', 'type'], as_index=False).sum().to_dict('r')
web_activities_browser = sorted(web_activities_browser, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_browser_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week', type=F('browser')).annotate(sum=Count('id'))
web_activities_browser_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week', type=F('browser')).annotate(sum=Sum('event_count'))
web_activities_browser = list(web_activities_browser_file) + list(web_activities_browser_ga)
if (len(web_activities_browser)):
web_activities_browser = pd.DataFrame(web_activities_browser).groupby(['week', 'type'], as_index=False).sum().to_dict('r')
web_activities_browser = sorted(web_activities_browser, key=lambda k : k['week'])
elif (group_type == 'monthly'):
web_activities_browser_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month', type=F('browser')).annotate(sum=Count('id'))
web_activities_browser_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month', type=F('browser')).annotate(sum=Sum('event_count'))
web_activities_browser = list(web_activities_browser_file) + list(web_activities_browser_ga)
if (len(web_activities_browser)):
web_activities_browser = pd.DataFrame(web_activities_browser).groupby(['month', 'type'], as_index=False).sum().to_dict('r')
web_activities_browser = sorted(web_activities_browser, key=lambda k : k['month'])
else:
web_activities_browser_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year', type=F('browser')).annotate(sum=Count('id'))
web_activities_browser_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year', type=F('browser')).annotate(sum=Sum('event_count'))
web_activities_browser = list(web_activities_browser_file) + list(web_activities_browser_ga)
if (len(web_activities_browser)):
web_activities_browser = pd.DataFrame(web_activities_browser).groupby(['year', 'type'], as_index=False).sum().to_dict('r')
web_activities_browser = sorted(web_activities_browser, key=lambda k : k['year'])
reports.append(create_report('session_browser_report', 'Statistiques d’activités Web par navigateurs Web',
web_activities_browser, 'column', group_type == 'none'))
# Web Activities os:
if (group_type == 'none'):
web_activities_os_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(type=F('operating_system')).annotate(sum=Count('id'))
web_activities_os_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(type=F('operating_system')).annotate(sum=Sum('event_count'))
web_activities_os = list(web_activities_os_file) + list(web_activities_os_ga)
if (len(web_activities_os)):
web_activities_os = pd.DataFrame(web_activities_os).groupby(['type'], as_index=False).sum().to_dict('r')
elif (group_type == 'daily'):
web_activities_os_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date'), type=F('operating_system')).annotate(sum=Count('id'))
web_activities_os_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date'), type=F('operating_system')).annotate(sum=Sum('event_count'))
web_activities_os = list(web_activities_os_file) + list(web_activities_os_ga)
if (len(web_activities_os)):
web_activities_os = pd.DataFrame(web_activities_os).groupby(['day', 'type'], as_index=False).sum().to_dict('r')
web_activities_os = sorted(web_activities_os, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_os_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week', type=F('operating_system')).annotate(sum=Count('id'))
web_activities_os_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week', type=F('operating_system')).annotate(sum=Sum('event_count'))
web_activities_os = list(web_activities_os_file) + list(web_activities_os_ga)
if (len(web_activities_os)):
web_activities_os = pd.DataFrame(web_activities_os).groupby(['week', 'type'], as_index=False).sum().to_dict('r')
web_activities_os = sorted(web_activities_os, key=lambda k : k['week'])
elif (group_type == 'monthly'):
web_activities_os_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
month=TruncMonth('visit_date')).values('month', type=F('operating_system')).annotate(sum=Count('id'))
web_activities_os_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
month=TruncMonth('date')).values('month', type=F('operating_system')).annotate(sum=Sum('event_count'))
web_activities_os = list(web_activities_os_file) + list(web_activities_os_ga)
if (len(web_activities_os)):
web_activities_os = pd.DataFrame(web_activities_os).groupby(['month', 'type'], as_index=False).sum().to_dict('r')
web_activities_os = sorted(web_activities_os, key=lambda k : k['month'])
else:
web_activities_os_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
year=TruncYear('visit_date')).values('year', type=F('operating_system')).annotate(sum=Count('id'))
web_activities_os_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
year=TruncYear('date')).values('year', type=F('operating_system')).annotate(sum=Sum('event_count'))
web_activities_os = list(web_activities_os_file) + list(web_activities_os_ga)
if (len(web_activities_os)):
web_activities_os = pd.DataFrame(web_activities_os).groupby(['year', 'type'], as_index=False).sum().to_dict('r')
web_activities_os = sorted(web_activities_os, key=lambda k : k['year'])
reports.append(create_report('session_os_report', 'Statistiques d’activités Web par systèmes d’exploitation',
web_activities_os, 'column', group_type == 'none'))
# Web Activities type:
if (group_type == 'none'):
web_activities_type_file = Interaction_f.objects.filter(visit_date__range=[
start_date, end_date]).values(type=F('event_name')).annotate(sum=Count('id'))
web_activities_type_ga = Interaction_ga.objects.filter(date__range=[
start_date, end_date]).values(type=F('event_name')).annotate(sum=Sum('event_count'))
web_activities_type = list(web_activities_type_file) + list(web_activities_type_ga)
if (len(web_activities_type)):
web_activities_type = pd.DataFrame(web_activities_type).groupby(['type'], as_index=False).sum().to_dict('r')
elif (group_type == 'daily'):
web_activities_type_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).values(
day=F('visit_date'), type=F('event_name')).annotate(sum=Count('id'))
web_activities_type_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).values(
day=F('date'), type=F('event_name')).annotate(sum=Sum('event_count'))
web_activities_type = list(web_activities_type_file) + list(web_activities_type_ga)
if (len(web_activities_type)):
web_activities_type = pd.DataFrame(web_activities_type).groupby(['day', 'type'], as_index=False).sum().to_dict('r')
web_activities_type = sorted(web_activities_type, key=lambda k : k['day'])
elif (group_type == 'weekly'):
web_activities_type_file = Interaction_f.objects.filter(visit_date__range=[start_date, end_date]).annotate(
week=TruncWeek('visit_date')).values('week', type=F('event_name')).annotate(sum=Count('id'))
web_activities_type_ga = Interaction_ga.objects.filter(date__range=[start_date, end_date]).annotate(
week=TruncWeek('date')).values('week', type=F('event_name')).annotate(sum=Sum('event_count'))
web_activities_type = list(web_activities_type_file) + list(web_activities_type_ga)
if (len(web_activities_type)):
web_activities_type = | pd.DataFrame(web_activities_type) | pandas.DataFrame |
import cv2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from PIL import Image
from skimage.transform import resize
from random import shuffle
from random import randint
import math
import random
from io import BytesIO
import jpeg4py as jpeg
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
list_paths = []
for subdir, dirs, files in os.walk("/home/gasimov_aydin/ieee"):
for file in files:
#print os.path.join(subdir, file)
filepath = subdir + os.sep + file
list_paths.append(filepath)
list_train = [filepath for filepath in list_paths if "train_new/" in filepath]
shuffle(list_train)
list_test = [filepath for filepath in list_paths if "test/" in filepath]
list_train = list_train
list_test = list_test
index = [os.path.basename(filepath) for filepath in list_test]
list_classes = list(set([os.path.dirname(filepath).split(os.sep)[-1] for filepath in list_paths if "train" in filepath]))
list_classes = ['Sony-NEX-7',
'Motorola-X',
'HTC-1-M7',
'Samsung-Galaxy-Note3',
'Motorola-Droid-Maxx',
'iPhone-4s',
'iPhone-6',
'LG-Nexus-5x',
'Samsung-Galaxy-S4',
'Motorola-Nexus-6']
img_size_1 = 512
img_size_2 = 512
def get_class_from_path(filepath):
return os.path.dirname(filepath).split(os.sep)[-1]
MANIPULATIONS = ['jpg70', 'jpg90', 'gamma0.8', 'gamma1.2', 'bicubic0.5', 'bicubic0.8', 'bicubic1.5', 'bicubic2.0']
def random_manipulation(img, manipulation=None):
if 1 == 1:
manipulation = random.choice(MANIPULATIONS)
if manipulation.startswith('jpg'):
quality = int(manipulation[3:])
out = BytesIO()
im = Image.fromarray(img)
im.save(out, format='jpeg', quality=quality)
im_decoded = jpeg.JPEG(np.frombuffer(out.getvalue(), dtype=np.uint8)).decode()
del out
del im
elif manipulation.startswith('gamma'):
gamma = float(manipulation[5:])
# alternatively use skimage.exposure.adjust_gamma
# img = skimage.exposure.adjust_gamma(img, gamma)
im_decoded = np.uint8(cv2.pow(img / 255., gamma)*255.)
elif manipulation.startswith('bicubic'):
scale = float(manipulation[7:])
im_decoded = cv2.resize(img,(0,0), fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC)
else:
assert False
return im_decoded
def crop(img):
width, height = img.size # Get dimensions
left = (width - 128) / 2
top = (height - 128) / 2
right = (width + 128) / 2
bottom = (height + 128) / 2
#center = randint(300, 1200)
#left = center - 299
#top = center -299
#right = center + 299
#bottom = center + 299
return np.array(img.crop((left, top, right, bottom)))
def get_crop(img, crop_size, random_crop=True):
center_x, center_y = img.shape[1] // 2, img.shape[0] // 2
half_crop = crop_size // 2
pad_x = max(0, crop_size - img.shape[1])
pad_y = max(0, crop_size - img.shape[0])
if (pad_x > 0) or (pad_y > 0):
img = np.pad(img, ((pad_y//2, pad_y - pad_y//2), (pad_x//2, pad_x - pad_x//2), (0,0)), mode='wrap')
center_x, center_y = img.shape[1] // 2, img.shape[0] // 2
if random_crop:
freedom_x, freedom_y = img.shape[1] - crop_size, img.shape[0] - crop_size
if freedom_x > 0:
center_x += np.random.randint(math.ceil(-freedom_x/2), freedom_x - math.floor(freedom_x/2) )
if freedom_y > 0:
center_y += np.random.randint(math.ceil(-freedom_y/2), freedom_y - math.floor(freedom_y/2) )
return img[center_y - half_crop : center_y + crop_size - half_crop, center_x - half_crop : center_x + crop_size - half_crop]
def read_and_resize(filepath):
#im_array = np.array(Image.open(filepath).convert('RGB'), dtype="uint8")
im_array = np.array(Image.open(filepath), dtype="uint8")
#pil_im = Image.fromarray(im_array)
#im_array = np.array(cv2.imread(filepath))
#new_img = pil_im.resize(( , img_size_2))
#w, h = pil_im.size
#w, h = pil_im.size
#img = pil_im.crop((w // 2 - 128, h // 2 - 128, w // 2 + 128, h // 2 + 128))
#new_array = np.array(crop(pil_im))
#print(new_array.shape)
#img = crop_img(pil_im,0,np.random.randint(0, 5))
#img_result = []
#for i in range(0,10):
# img_result.append(crop(pil_im))
new_array = np.array(get_crop(im_array ,512,True))
#new_array = np.array(img.resize((img_size_1 , img_size_2)))
#new_array = np.array(img)
return new_array
def read_and_resize_test(filepath):
manip = 0
#print(filepath)
if ('_manip.tif' in filepath):
manip = 1
#print(1)
else:
manip = 0
#print(0)
#im_array = np.array(Image.open(filepath).convert('RGB'), dtype="uint8")
im_array = np.array(Image.open(filepath), dtype="uint8")
#pil_im = Image.fromarray(im_array)
#im_array = np.array(cv2.imread(filepath))
#w, h = pil_im.size
#new_img = pil_im.resize(( , img_size_2))
#w, h = pil_im.size
if (manip == 1):
img2 = random_manipulation(im_array)
new_array = np.array(get_crop(img2,512,True))
else:
new_array = im_array #np.array(get_crop(im_array,224,True))
#print(new_array.shape)
#img2 = img2.crop((w // 2 - 128, h // 2 - 128, w // 2 + 128, h // 2 + 128))
#img = crop_img(pil_im,0,np.random.randint(0, 5))
#new_array = np.array(pil_im.resize((img_size_1 , img_size_2)))
#new_array = np.array(get_crop(img2,128,True))
#new_array = np.array(img)
#print(new_array.shape)
return new_array
def label_transform(labels):
labels = pd.get_dummies(pd.Series(labels))
label_index = labels.columns.values
return labels, label_index
#X_train = np.array([read_and_resize(filepath) for filepath in list_train])
#X_test = np.array([read_and_resize_test(filepath) for filepath in list_test])
X_train_data = []
X_test_data = []
X_labels = []
for i in range(0,len(list_train)-14330):
#print(list_train[i],' ', len(list_train),' - ', y)
xu = read_and_resize(list_train[i])
if (xu.shape == (512,512,3)):
X_train_data.append(xu)
X_labels.append(get_class_from_path(list_train[i]))
#print(xu.shape, '-', get_class_from_path(list_train[i]))
#print(np.array(X_train_data).shape, '-', len(list_train))
X_train = np.array(X_train_data)
print('Train shape: ', X_train.shape)
for i in range(0,len(list_test)-2630):
X_test_data.append(read_and_resize_test(str(list_test[i])))
#print(list_test[i],' ', len(list_test),' file: ', filepath)
#print(np.array(X_test_data).shape)
X_test = np.array(X_test_data)
print('Test shape: ', X_test.shape)
#labels = [get_class_from_path(filepath) for filepath in list_train]
y, label_index = label_transform(X_labels)
y = np.array(y)
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import optimizers, losses, activations, models
from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D
from keras.applications.xception import Xception
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.mobilenet import MobileNet
from keras.optimizers import Adam
inp = Input(shape=(img_size_1, img_size_2, 3), name="X_1") #(img_size_1, img_size_2, 3)
nclass = len(label_index)
def get_model():
base_model = InceptionResNetV2(include_top=False, weights='imagenet' ,
input_tensor=inp, classes=nclass)
x = base_model.output
x1 = GlobalMaxPooling2D()(x)
merge_one = Dense(1024, activation='relu', name='fc2')(x1)
merge_one = Dropout(0.5)(merge_one)
merge_one = Dense(256, activation='relu', name='fc3')(merge_one)
merge_one = Dropout(0.5)(merge_one)
predictions = Dense(nclass, activation='softmax')(merge_one)
model = Model(input=base_model.input, output=predictions)
#model.load_weights('weightsV2.best.hdf5')
sgd = Adam(lr=1e-4, decay=1e-5)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
return model
model = get_model()
#print('Train shape: ',X_train.shape)
file_path="weightsV2.best.hdf5"
model.summary()
model.load_weights('weightsV2.best.hdf5')
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True,mode='max')
early = EarlyStopping(monitor="val_loss", patience=15)
callbacks_list = [checkpoint, early] #early
model.fit(X_train, y, validation_split=0.1, epochs=100, shuffle=True, verbose=1, batch_size = 22,
callbacks=callbacks_list)
#print(history)
#model.save_weights('my_model_weights2.h5')
#model.load_weights(file_path)
predicts = model.predict(X_test)
predicts = np.argmax(predicts, axis=1)
predicts = [label_index[p] for p in predicts]
df = | pd.DataFrame(columns=['fname', 'camera']) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = | to_timedelta([pd.NaT]) | pandas.to_timedelta |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 22:50:43 2018
@author: kennedy
"""
"""
Credit:
https://www.quantopian.com/posts/technical-analysis-indicators-without-talib-code
Bug Fix by Kennedy:
Works fine for library import.
returns only column of the indicator result.
Can be used as a predictor for for forecasting
stock returns using predictive modeling
in Machine Learning.
I configured it to meet my demand for multiple predictive modelling.
"""
import pandas as pd
import numpy as np
class TechnicalIndicators:
def moving_average(df, n):
"""Calculate the moving average for the given data.
:param df: pandas.DataFrame
:param n: window
:return: pandas.DataFrame
"""
MA = pd.Series(df['Close'].rolling(n, min_periods=n).mean(), name='MA_{}'.format(n))
return MA
def exponential_moving_average(df, n):
"""
:param df: pandas.DataFrame
:param n: window of data to take moving exponent mean
:return: pandas.DataFrame
"""
EMA = pd.Series(df['Close'].ewm(span=n, min_periods=n).mean(), name='EMA_' + str(n))
return EMA
def momentum(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = pd.Series(df['Close'].diff(n), name='Momentum_' + str(n))
return M
def rate_of_change(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
M = df['Close'].diff(n - 1)
N = df['Close'].shift(n - 1)
ROC = pd.Series(M / N, name='ROC_' + str(n))
return ROC
def average_true_range(df, n):
"""
:param df: pandas.DataFrame
:param n: data window
:return: pandas.DataFrame
"""
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max(df.loc[i + 1, 'High'], df.loc[i, 'Close']) - min(df.loc[i + 1, 'Low'], df.loc[i, 'Close'])
TR_l.append(TR)
i = i + 1
TR_s = | pd.Series(TR_l) | pandas.Series |
import pandas as p
from pandas import DataFrame as df
import matplotlib.pyplot as pl
from sklearn.linear_model import LinearRegression
data=p.read_csv('cost_revenue_clean.csv')
x= | df(data,columns=['production_budget']) | pandas.DataFrame |
# %% imports and settings
from pandarallel import pandarallel
import datar.all as r
from datar import f
import plotnine as p9
import os
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
pd.set_option("max_colwidth", 250) # column最大宽度
pd.set_option("display.width", 250) # dataframe宽度
| pd.set_option("display.max_columns", None) | pandas.set_option |
# Module: Preprocess
# Author: <NAME> <<EMAIL>>
# License: MIT
import pandas as pd
import numpy as np
import ipywidgets as wg
from IPython.display import display
from ipywidgets import Layout
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin, clone
from sklearn.impute._base import _BaseImputer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.manifold import TSNE
from sklearn.decomposition import IncrementalPCA
from sklearn.preprocessing import KBinsDiscretizer
from pyod.models.knn import KNN
from pyod.models.iforest import IForest
from pyod.models.pca import PCA as PCA_od
from sklearn import cluster
from scipy import stats
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.ensemble import RandomForestRegressor as rfr
from lightgbm import LGBMClassifier as lgbmc
from lightgbm import LGBMRegressor as lgbmr
import sys
import gc
from sklearn.pipeline import Pipeline
from sklearn import metrics
from datetime import datetime
import calendar
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from typing import Optional, Union
from pycaret.internal.logging import get_logger
from pycaret.internal.utils import infer_ml_usecase
from sklearn.utils.validation import check_is_fitted, check_X_y, check_random_state
from sklearn.utils.validation import _deprecate_positional_args
from sklearn.utils import _safe_indexing
from sklearn.exceptions import NotFittedError
| pd.set_option("display.max_columns", 500) | pandas.set_option |
# -*- coding: utf-8 -*-
"""
Récupérer des mails d'étudiants en pièce jointe (1:1)
=====================================================
Récupère des fichiers en pièce jointe provenant d'étudiants comme un rendu de projet.
Le programme suppose qu'il n'y en a qu'un par étudiant, que tous les mails ont été
archivés dans un répertoire d'une boîte de message, ici :epkg:`gmail`.
Il faut supprimer le contenu du répertoire pour mettre à jour l'ensemble
des projets. Dans le cas contraire, le code est prévu pour mettre à jour le répertoire
à partir des derniers mails recensés dans le fichiers *mails.txt*.
La récupération se passe souvent en deux étapes.
La prmeière récupère tous les mails et crée un premier archivage
sans tenir compte des groupes. On créé alors un fichier :epkg:`Excel`
qui ressemble à peu près à ceci :
.. runpython::
from pandas import DataFrame
df = DataFrame(dict(groupe=[1, 1, 2], mail=['a.a@m', 'b.b@m', 'c.c@m'],
sujet=['sub1', 'sub1', 'sub2']))
print(df)
On efface tout excepté ce fichier puis on récupère une seconde fois
tous les projets afin de ne créer qu'un répertoire par groupe.
.. _script-fetch-students-projets-py:
"""
#########################################
# import
import sys
import os
import pandas
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import keyring
#################################
# Paramètres de la récupération,
# tous les mails doivent être dans le même répertoire
# de la boîte de message.
server = "imap.gmail.com"
school = "ASSAS"
date = "1-May-2018"
pattern = "Python_{0}_Projet_2018"
group_def = "groupes.xlsx"
col_subject, col_group, col_mail, col_student = "sujet", "groupe", "mail", "Nom"
if school == 'ENSAE':
do_mail = True
mailfolder = ["ensae/ENSAE_1A"]
dest_folder = os.path.normpath(os.path.abspath(os.path.join(
*([os.path.dirname(__file__)] + ([".."] * 5) + ["_data", "ecole", "ENSAE", "2017-2018", "1A_projet"]))))
print("dest", dest_folder)
elif school == 'ASSAS':
do_mail = True
mailfolder = ["ensae/assas"]
dest_folder = os.path.normpath(os.path.abspath(os.path.join(
*([os.path.dirname(__file__)] + ([".."] * 5) + ["_data", "ecole", "assas", "2017-2018", "projet"]))))
print("dest", dest_folder)
else:
raise NotImplementedError()
###########################
# End of customization.
path_df = os.path.join(dest_folder, group_def)
if os.path.exists(path_df):
df_group = pandas.read_excel(path_df)
if col_subject not in df_group.columns:
raise Exception('{0} not in {1}'.format(
col_subject, list(df_group.columns)))
if col_mail not in df_group.columns:
raise Exception('{0} not in {1}'.format(
col_mail, list(df_group.columns)))
if col_group not in df_group.columns:
raise Exception('{0} not in {1}'.format(
col_group, list(df_group.columns)))
else:
df_group = None
basename = pattern.format(mailfolder[0].split("/")[-1])
filename_zip = os.path.join(dest_folder, basename + ".zip")
convert_files = True
filename_mails = os.path.join(dest_folder, "emails.txt")
filename_excel = os.path.join(dest_folder, basename + ".xlsx")
#########################################
# Creates the folder if it does not exist.
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
#########################################
# Logging et import des fonctions dont on a besoin.
from pyquickhelper.loghelper import fLOG # fetch_student_projects_from_gmail
fLOG(OutputPrint=True)
from ensae_teaching_cs.automation_students import ProjectsRepository, grab_addresses
from pyquickhelper.filehelper import encrypt_stream
from pymmails import MailBoxImap, EmailMessageRenderer, EmailMessageListRenderer
from pymmails.render.email_message_style import template_email_html_short
###########
# Identifiants. On utilise :epkg:`keyring` pour récupérer des mots de passe.
user = keyring.get_password("gmail", os.environ["COMPUTERNAME"] + "user")
pwd = keyring.get_password("gmail", os.environ["COMPUTERNAME"] + "pwd")
password = keyring.get_password("enc", os.environ["COMPUTERNAME"] + "pwd")
if user is None or pwd is None or password is None:
print("ERROR: password or user or crypting password is empty, you should execute:")
print(
'keyring.set_password("gmail", os.environ["COMPUTERNAME"] + "user", "..")')
print(
'keyring.set_password("gmail", os.environ["COMPUTERNAME"] + "pwd", "..")')
print(
'keyring.set_password("enc", os.environ["COMPUTERNAME"] + "pwd", "..")')
print("Exit")
sys.exit(0)
password = bytes(password, "ascii")
###########
# Les adresses à éviter...
skip_address = {
'<EMAIL>',
'<EMAIL>',
}
###############
# Gathers mails and creates a dataframe if it does not exist.
fLOG("[fetch_student_projects_from_gmail] start")
if df_group is not None:
if os.path.exists(filename_mails):
with open(filename_mails, "r", encoding="utf8") as f:
lines = f.readlines()
emails = [l.strip("\r\t\n ") for l in lines]
emails = [_ for _ in emails if _ not in skip_address]
else:
box = MailBoxImap(user, pwd, server, ssl=True, fLOG=fLOG)
box.login()
emails = grab_addresses(box, mailfolder, date, fLOG=fLOG)
box.logout()
emails = list(sorted(set([_.strip("<>").lower()
for _ in emails if _ not in skip_address])))
with open(filename_mails, "w", encoding="utf8") as f:
f.write("\n".join(emails))
else:
emails = [_ for _ in df_group[col_mail] if _ not in skip_address]
#####################
# Creates a dataframe.
if df_group is None:
import pandas
rows = [{col_mail: mail, col_sujet: "?", col_group: i + 1}
for i, mail in enumerate(emails)]
df = | pandas.DataFrame(rows) | pandas.DataFrame |
# Dependencies
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter('ignore', UserWarning)
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import sys
import argparse
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import itertools
from scipy import stats
from sklearn.metrics import auc, accuracy_score, roc_curve, precision_score, recall_score, f1_score, roc_auc_score
from lightgbm import LGBMClassifier
import lightgbm as lgb
import matplotlib.gridspec as gridspec
import seaborn as sns
import pylab as plot
import pandas
import time
from statsmodels.stats.outliers_influence import variance_inflation_factor
from joblib import Parallel, delayed
# Function to calculate Variacne Inflation Factor for Pandas dataframe
def calculate_vif_(X):
variables = [X.columns[i] for i in range(X.shape[1])]
dropped=True
while dropped:
dropped=False
print(len(variables))
vif = Parallel(n_jobs=1,verbose=5)(delayed(variance_inflation_factor)(X[variables].values, ix) for ix in range(len(variables)))
print(vif)
maxloc = vif.index(max(vif))
if max(vif) > thresh:
print(time.ctime() + ' dropping \'' + X[variables].columns[maxloc] + '\' at index: ' + str(maxloc))
variables.pop(maxloc)
dropped=True
print('Remaining variables:')
print([variables])
return X[[i for i in variables]]
def parse_args():
parser = argparse.ArgumentParser(description = "", epilog = "")
parser.add_argument("-df", "--dataFolder", help="Path to where the training data (TCGA, DepMap, Embedding) is stored (REQUIRED).", dest="dataFolder")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
available_samples = ["s1"]
cancer_type_list = ["liver","breast","bladder", "colon", "ovarian", "kidney", "leukemia","pancreatic","lung"]
orderFeatures = ["essentiality","mutation","expression", "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "e10", "e11", "e12", "e13", "e14", "e15", "e16", "e17", "e18", "e19", "e20", "e21", "e22", "e23", "e24", "e25", "e26", "e27", "e28", "e29", "e30", "e31", "label"]
features = ["essentiality","mutation","expression", "e0", "e1", "e2", "e3", "e4", "e5", "e6", "e7", "e8", "e9", "e10", "e11", "e12", "e13", "e14", "e15", "e16", "e17", "e18", "e19", "e20", "e21", "e22", "e23", "e24", "e25", "e26", "e27", "e28", "e29", "e30", "e31"]
for cancer_type in cancer_type_list:
for inx, sampleNumber in enumerate(available_samples):
# Load dataset
data = pandas.read_csv(args.dataFolder + cancer_type.capitalize() + "/" + cancer_type + "_training_data_" + sampleNumber + ".dat", header=0, sep=",")
data.drop("gene", axis=1, inplace=True)
data = data[data['label'] != 2]
dataframePositive = data[data['label'] == 1]
dataframeNegative = data[data['label'] == 0]
positiveSize = dataframePositive.shape[0]
negativeSize = dataframeNegative.shape[0]
# Set them the same size
if(positiveSize > negativeSize):
dataframePositive = dataframePositive.head(-(positiveSize-negativeSize))
elif(negativeSize > positiveSize):
dataframeNegative = dataframeNegative.head(-(negativeSize-positiveSize))
data = dataframePositive.copy()
data = | pd.concat([dataframePositive, dataframeNegative]) | pandas.concat |
"""
A set of classes for aggregation of TERA data sources into common formats.
"""
from rdflib import Graph, Namespace, Literal, URIRef, BNode
from rdflib.namespace import RDF, OWL, RDFS
UNIT = Namespace('http://qudt.org/vocab/unit#')
import pandas as pd
import validators
import glob
import math
from tqdm import tqdm
import warnings
import copy
import tera.utils as ut
nan_values = ['nan', float('nan'),'--','-X','NA','NC',-1,'','sp.', -1,'sp,','var.','variant','NR','sp','ssp','ssp.','ssp,']
class DataObject:
def __init__(self, namespace = 'http://www.example.org/', verbose = True, name = 'Data Object'):
"""
Base class for aggregation of data.
Parameters
----------
namespace : str
Base URI for the data set.
verbose : bool
"""
self.graph = Graph()
self.namespace = Namespace(namespace)
self.name = name
self.verbose = verbose
def __add__(self, other):
c = copy.deepcopy(self)
c.graph += other.graph
return c
def __str__(self):
return self.name
def __dict__(self):
return {
'namespace':self.namespace,
'num_triples':len(self.graph)
}
def __del__(self):
self.graph = Graph()
def save(self, path):
"""Save graph to file.
Parameters
----------
path : str
ex: file.nt
"""
self.graph.serialize(path, format=path.split('.').pop(-1))
def replace(self, converted):
"""Replace old entities with new in data object.
Usefull after converting between datasets.
Parameters
----------
converted : list
list of (old, new) tuples.
"""
if len(converted) < 1:
warnings.warn('Empty mapping list.')
return
tmp = set()
for old, new in converted:
triples = self.graph.triples((old,None,None))
tmp |= set([(new,p,o) for _,p,o in triples])
triples = self.graph.triples((None, None, old))
tmp |= set([(s,p,new) for s,p,_ in triples])
self.graph.remove((old,None,None))
self.graph.remove((None,None,old))
for t in tmp:
self.graph.add(t)
def apply_func(self, func, dataframe, cols, sub_bar=False):
pbar = None
if self.verbose and not sub_bar:
pbar = tqdm(total=len(dataframe.index),desc=self.name)
for row in zip(*[dataframe[c] for c in cols]):
func(row)
if pbar: pbar.update(1)
class Taxonomy(DataObject):
def __init__(self,
namespace = 'https://www.ncbi.nlm.nih.gov/taxonomy/',
name = 'NCBI Taxonomy',
verbose = True,
directory = None):
"""
Aggregation of the NCBI Taxonomy.
Parameters
----------
directory : str
Path to data set. Downloaded from ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/new_taxdump.zip
"""
super(Taxonomy, self).__init__(namespace, verbose, name)
if directory:
self._load_ncbi_taxonomy(directory)
self.verbose = verbose
def _add_subproperties(self, uri, pref = False):
self.graph.add((uri,OWL.subPropertyOf,RDFS.label))
if pref:
self.graph.add((uri,OWL.subPropertyOf,URIRef('http://www.w3.org/2004/02/skos/core#prefLabel')))
def _load_ncbi_taxonomy(self, directory):
self._load_hierarchy(directory+'nodes.dmp')
self._load_divisions(directory+'division.dmp')
self._load_names(directory+'names.dmp')
self._add_domain_and_range_triples()
self._add_disjoint_axioms()
def _load_hierarchy(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,4], names=['child','parent','rank','division'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,p,r,d = row
c = self.namespace['taxon/'+str(c)]
rc = r
r = r.replace(' ','_')
if r != 'no_rank':
self.graph.add((c, self.namespace['rank'], self.namespace['rank/'+r]))
self.graph.add((self.namespace['rank/'+r], RDFS.label, Literal(rc)))
self.graph.add((self.namespace['rank/'+r], RDF.type, self.namespace['Rank']))
p = self.namespace['taxon/'+str(p)]
d = str(d).replace(' ','_')
d = self.namespace['division/'+str(d)]
if r == 'species': #species are treated as instances
self.graph.add((c,RDF.type, p))
self.graph.add((c, RDF.type, d))
else:
self.graph.add((c,RDFS.subClassOf, p))
self.graph.add((c, RDFS.subClassOf, d))
self.apply_func(func, df, ['child','parent','rank','division'])
def _load_names(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2,3], names=['taxon','name','unique_name','name_type'],na_values = nan_values,dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
c,n,un,nt = row
c = self.namespace['taxon/'+str(c)]
n = Literal(n)
un = Literal(un)
if len(un) > 0:
self.graph.add((c, self.namespace['uniqueName'], un))
self._add_subproperties(self.namespace['uniqueName'], pref=True)
if len(n) > 0:
ntl = Literal(nt)
nt = self.namespace[nt.replace(' ','_')]
self._add_subproperties(nt,pref=False)
self.graph.add((c,nt,n))
self.graph.add((nt,RDFS.label,ntl))
self.graph.add((nt,RDFS.domain,self.namespace['Taxon']))
self.apply_func(func, df, ['taxon','name','unique_name','name_type'])
def _load_divisions(self, path):
df = pd.read_csv(path, sep='|', usecols=[0,1,2], names=['division','acronym','name'], na_values = nan_values, dtype = str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
d,a,n = row
d = self.namespace['division/'+str(d)]
self.graph.add((d,RDF.type,self.namespace['Division']))
self.graph.add((d,RDFS.label,Literal(n)))
#self.graph.add((d,RDFS.label,Literal(a)))
self.apply_func(func, df, ['division','acronym','name'])
def _add_domain_and_range_triples(self):
self.graph.add((self.namespace['rank'],RDFS.domain,self.namespace['Taxon']))
self.graph.add((self.namespace['rank'],RDFS.range,self.namespace['Rank']))
def _add_disjoint_axioms(self):
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/0'], #Bacteria
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/1'], #Invertebrates
OWL.disjoinWith,d))
for d in [self.namespace['division/4'], #Plants and Fungi
self.namespace['division/9'], #Viruses
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/2'], #Mammals
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/3'], #Phages
OWL.disjoinWith,d))
for d in [self.namespace['division/2'], #Mammals
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/4'], #Plants and Fungi
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/5'], #Primates
OWL.disjoinWith,d))
for d in [self.namespace['division/1']]: #Invertebrates
self.graph.add((self.namespace['division/6'], #Rodents
OWL.disjoinWith,d))
for d in [self.namespace['division/1'], #Invertebrates
self.namespace['division/0'], #Bacteria
self.namespace['division/2'], #Mammals
self.namespace['division/4'], #Plants and Fungi
self.namespace['division/5'], #Primates
self.namespace['division/6'], #Rodents
self.namespace['division/10']]: #Vertebrates
self.graph.add((self.namespace['division/9'], #Viruses
OWL.disjoinWith,d))
class Traits(DataObject):
def __init__(self,
namespace = 'https://eol.org/pages/',
name = 'EOL Traits',
verbose = True,
directory = None):
"""
Encyclopedia of Life Traits.
Parameters
----------
directory : str
Path to data set. See https://opendata.eol.org/dataset/all-trait-data-large
"""
super(Traits, self).__init__(namespace, verbose, name)
if directory:
self._load_eol_traits(directory)
def _load_eol_traits(self, directory):
self._load_traits(directory+'trait_bank/traits.csv')
self._load_desc(directory+'trait_bank/terms.csv')
for f in glob.glob(directory+'eol_rels/*.csv'):
self._load_eol_subclasses(f)
def _load_traits(self, path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','value_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o = row
s = self.namespace[s]
try:
val = validators.url(o)
o = URIRef(o)
except TypeError:
o = Literal(o)
val = True
if validators.url(s) and validators.url(p) and val:
self.graph.add((URIRef(s),URIRef(p),o))
self.apply_func(func, df, ['page_id','predicate','value_uri'])
def _load_literal_traits(self,path):
df = pd.read_csv(path, sep=',', usecols=['page_id','predicate','measurement','units_uri'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
s,p,o,u = row
s = self.namespace[s]
try:
o = Literal(o)
u = URIRef(u)
bnode = BNode()
self.graph.add((bnode,RDF.value,o))
self.graph.add((bnode,UNIT.units,u))
self.graph.add((URIRef(s),URIRef(p),bnode))
except TypeError:
pass
self.apply_func(func, df, ['page_id','predicate',''])
def _load_desc(self, path):
df = pd.read_csv(path, sep=',', usecols=['uri','name'], na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
def func(row):
uri,name = row
if validators.url(uri) and name:
self.graph.add((URIRef(uri),RDFS.label,Literal(name)))
self.apply_func(func, df, ['uri','name'])
def _load_eol_subclasses(self, path):
try:
try:
df = pd.read_csv(path,sep=',',usecols=['child','parent'],na_values = nan_values, dtype=str)
df.dropna(inplace=True)
df = df.apply(lambda x: x.str.strip())
except ValueError:
df = | pd.read_csv(path,sep=',',header=None,na_values = nan_values, dtype=str) | pandas.read_csv |
#!/usr/bin/env python3
# coding: utf-8
# In[3]:
import csv
import pandas as pd
from connected_component import connected_component_subgraphs as ccs
from strongly_connected_component import strongly_connected_components as scc
# In[4]:
'''
df = pd.read_csv("/root/.encrypted/.pythonSai/moreno_highschool/out.moreno_highschool_highschool", sep=" ", header=None, skiprows=2, names=["ndidfr", "ndidto", "weight"])
df = df[["ndidfr", "ndidto"]].dropna()
print(df.head())
'''
# ### Undirected Graph:
# In[5]:
import networkx as nx
import matplotlib.pyplot as plt
# In[6]:
'''
G=nx.Graph()
sdt = []
for index, row in df.iterrows():
if(row['ndidfr'] not in sdt):
G.add_node(row['ndidfr'])
if(row['ndidto'] not in sdt):
G.add_node(row['ndidto'])
for index, row in df.iterrows():
G.add_edges_from([(row['ndidfr'],row['ndidto'])])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
# plt.savefig("/root/gitlabRepos/python/moreno_highschool/g.pdf", bbox_inches="tight")
plt.show()
# In[7]:
'''
import numpy as np
'''
sni = np.zeros(shape=(70,70), dtype=int)
for index, row in df.iterrows():
sni[int(row['ndidfr'])-1][int(row['ndidto'])-1] = 1
np.set_printoptions(threshold=np.inf)
tni = sni.transpose()
print(tni)
# In[8]:
nnd = []
for i in range(0, len(tni)):
tnl = list(tni[i])
for j in range(0, len(tnl)):
if(tnl[j]==1):
nnd.append([i+1, j+1])
# print(nnd)
ndf = pd.DataFrame(data=nnd, columns=['ndideg', 'ndidfr'])
print(ndf)
# In[9]:
G=nx.Graph()
sdt = []
for index, row in ndf.iterrows():
if(row['ndideg'] not in sdt):
G.add_node(row['ndideg'])
if(row['ndidfr'] not in sdt):
G.add_node(row['ndidfr'])
for index, row in ndf.iterrows():
G.add_edges_from([(row['ndidfr'],row['ndideg'])])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
# plt.savefig("C:\\Users\\user\\Documents\\g.pdf", bbox_inches="tight")
plt.show()
# ##### KCore of graph:
# In[10]:
for i in range(0, len(G)):
graphs = ccs(nx.k_core(G,k=i))
if(graphs is None):
break
else:
for g in graphs:
print("This is the " + str(i) + " core of graph")
print(g.edges())
SCG=nx.Graph()
scs = []
for (i,j) in g.edges():
if(i not in scs):
SCG.add_node(i)
scs.append(i)
if(j not in scs):
SCG.add_node(j)
scs.append(j)
for (i,j) in g.edges():
SCG.add_edges_from([(i,j)])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(SCG, with_labels=True, font_size=12)
plt.show()
# ### Directed Graph:
# In[11]:
G=nx.DiGraph()
sdt = []
for index, row in df.iterrows():
if(row['ndidfr'] not in sdt):
G.add_node(row['ndidfr'])
if(row['ndidto'] not in sdt):
G.add_node(row['ndidto'])
for index, row in df.iterrows():
G.add_edges_from([(row['ndidfr'],row['ndidto'])])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
#plt.savefig("C:\\Users\\user\\Documents\\g.pdf", bbox_inches="tight")
plt.show()
# In[12]:
import numpy as np
sni = np.zeros(shape=(70,70), dtype=int)
for index, row in df.iterrows():
sni[int(row['ndidfr'])-1][int(row['ndidto'])-1] = 1
np.set_printoptions(threshold=np.inf)
tni = sni.transpose()
print(tni)
# In[13]:
nnd = []
for i in range(0, len(tni)):
tnl = list(tni[i])
for j in range(0, len(tnl)):
if(tnl[j]==1):
nnd.append([i+1, j+1])
# print(nnd)
ndf = pd.DataFrame(data=nnd, columns=['ndideg', 'ndidfr'])
print(ndf)
# In[14]:
G=nx.DiGraph()
sdt = []
for index, row in ndf.iterrows():
if(row['ndideg'] not in sdt):
G.add_node(row['ndideg'])
if(row['ndidfr'] not in sdt):
G.add_node(row['ndidfr'])
for index, row in ndf.iterrows():
G.add_edges_from([(row['ndidfr'],row['ndideg'])])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
# plt.savefig("C:\\Users\\user\\Documents\\g.pdf", bbox_inches="tight")
plt.show()
'''
# #### KCore of directed graph:
# In[15]:
# Copyright (C) 2004-2019 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
#
# Authors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
"""
Find the k-cores of a graph.
The k-core is found by recursively pruning nodes with degrees less than k.
See the following references for details:
An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
Generalized Cores
<NAME> and <NAME>, 2002.
https://arxiv.org/pdf/cs/0202039
For directed graphs a more general notion is that of D-cores which
looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core
is the k-core.
D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy
<NAME>, <NAME>, <NAME>, ICDM 2011.
http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf
Multi-scale structure and topological anomaly detection via a new network \
statistic: The onion decomposition
<NAME>, <NAME>, and <NAME>
Scientific Reports 6, 31708 (2016)
http://doi.org/10.1038/srep31708
"""
#import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
from networkx.algorithms.shortest_paths \
import single_source_shortest_path_length as sp_length
__all__ = ['core_number', 'find_cores', 'k_core', 'k_shell',
'k_crust', 'k_corona', 'k_truss', 'onion_layers']
def scc_connected_components(G):
preorder={}
isconnected = False
i=0 # Preorder counter
for source in G:
if source not in preorder:
i=i+1
preorder[source]=i
source_nbrs=G[source]
isconnected = False
else:
source_nbrs=G[source]
isconnected = True
for w in source_nbrs:
if w not in preorder:
preorder[w]=i
return i
def strongly_connected_components(G):
preorder={}
lowlink={}
scc_found={}
scc_queue = []
i=0 # Preorder counter
for source in G:
if source not in scc_found:
queue=[source]
while queue:
v=queue[-1]
if v not in preorder:
i=i+1
preorder[v]=i
done=1
v_nbrs=G[v]
for w in v_nbrs:
if w not in preorder:
queue.append(w)
done=0
break
if done==1:
lowlink[v]=preorder[v]
for w in v_nbrs:
if w not in scc_found:
if preorder[w]>preorder[v]:
lowlink[v]=min([lowlink[v],lowlink[w]])
else:
lowlink[v]=min([lowlink[v],preorder[w]])
queue.pop()
if lowlink[v]==preorder[v]:
scc_found[v]=True
scc=[v]
while scc_queue and preorder[scc_queue[-1]]>preorder[v]:
k=scc_queue.pop()
scc_found[k]=True
scc.append(k)
yield scc
else:
scc_queue.append(v)
def strongly_connected_components_recursive(G):
def visit(v,cnt):
root[v]=cnt
visited[v]=cnt
cnt+=1
stack.append(v)
for w in G[v]:
if w not in visited:
for c in visit(w,cnt):
yield c
if w not in component:
root[v]=min(root[v],root[w])
if root[v]==visited[v]:
component[v]=root[v]
tmpc=[v] # hold nodes in this component
while stack[-1]!=v:
w=stack.pop()
component[w]=root[v]
tmpc.append(w)
stack.remove(v)
yield tmpc
visited={}
component={}
root={}
cnt=0
stack=[]
for source in G:
if source not in visited:
for c in visit(source,cnt):
yield c
def strongly_connected_component_subgraphs(G, copy=True):
for comp in strongly_connected_components(G):
if copy:
yield G.subgraph(comp).copy()
else:
yield G.subgraph(comp)
def number_strongly_connected_components(G):
return scc_connected_components(G)
def is_strongly_connected(G):
if len(G)==0:
raise nx.NetworkXPointlessConcept(
"""Connectivity is undefined for the null graph.""")
return len(list(strongly_connected_components(G))[0])==len(G)
def core_number(G):
"""Returns the core number for each vertex.
A k-core is a maximal subgraph that contains nodes of degree k or more.
The core number of a node is the largest value k of a k-core containing
that node.
Parameters
----------
G : NetworkX graph
A graph or directed graph
Returns
-------
core_number : dictionary
A dictionary keyed by node to the core number.
Raises
------
NetworkXError
The k-core is not implemented for graphs with self loops
or parallel edges.
Notes
-----
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
if nx.number_of_selfloops(G) > 0:
msg = ('Input graph has self loops which is not permitted; '
'Consider using G.remove_edges_from(nx.selfloop_edges(G)).')
raise NetworkXError(msg)
degrees = dict(G.in_degree())
# Sort nodes by degree.
nodes = sorted(degrees, key=degrees.get)
bin_boundaries = [0]
curr_degree = 0
for i, v in enumerate(nodes):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
node_pos = {v: pos for pos, v in enumerate(nodes)}
# The initial guess for the core number of a node is its degree.
core = degrees
nbrs = {v: list(G.neighbors(v)) for v in G}
# print(nbrs)
for v in nodes:
for u in nbrs[v]:
if core[u] > core[v]:
# nbrs[u].remove(v)
pos = node_pos[u]
bin_start = bin_boundaries[core[u]]
node_pos[u] = bin_start
node_pos[nodes[bin_start]] = pos
nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
# print(core)
return core
find_cores = core_number
def _core_subgraph(G, k_filter, k=None, core=None):
"""Returns the subgraph induced by nodes passing filter `k_filter`.
Parameters
----------
G : NetworkX graph
The graph or directed graph to process
k_filter : filter function
This function filters the nodes chosen. It takes three inputs:
A node of G, the filter's cutoff, and the core dict of the graph.
The function should return a Boolean value.
k : int, optional
The order of the core. If not specified use the max core number.
This value is used as the cutoff for the filter.
core : dict, optional
Precomputed core numbers keyed by node for the graph `G`.
If not specified, the core numbers will be computed from `G`.
"""
if core is None:
core = core_number(G)
if k is None:
k = max(core.values())
nodes = (v for v in core if k_filter(v, k, core))
return G.subgraph(nodes).copy()
def k_core(G, k=None, core_number=None):
"""Returns the k-core of G.
A k-core is a maximal subgraph that contains nodes of degree k or more.
Parameters
----------
G : NetworkX graph
A graph or directed graph
k : int, optional
The order of the core. If not specified return the main core.
core_number : dictionary, optional
Precomputed core numbers for the graph G.
Returns
-------
G : NetworkX graph
The k-core subgraph
Raises
------
NetworkXError
The k-core is not defined for graphs with self loops or parallel edges.
Notes
-----
The main core is the core with the largest degree.
Not implemented for graphs with parallel edges or self loops.
For directed graphs the node degree is defined to be the
in-degree + out-degree.
Graph, node, and edge attributes are copied to the subgraph.
See Also
--------
core_number
References
----------
.. [1] An O(m) Algorithm for Cores Decomposition of Networks
<NAME> and <NAME>, 2003.
https://arxiv.org/abs/cs.DS/0310049
"""
def k_filter(v, k, c):
return c[v] >= k
return _core_subgraph(G, k_filter, k, core_number)
def k_shell(G, k=None, core_number=None):
def k_filter(v, k, c):
return c[v] == k
return _core_subgraph(G, k_filter, k, core_number)
def connected_component_subgraphs(G, copy=True):
"""Generate connected components as subgraphs.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
comp : generator
A generator of graphs, one for each connected component of G.
copy: bool (default=True)
If True make a copy of the graph attributes
Examples
--------
>>> G = nx.path_graph(4)
>>> G.add_edge(5,6)
>>> graphs = list(nx.connected_component_subgraphs(G))
See Also
--------
connected_components
Notes
-----
For undirected graphs only.
Graph, node, and edge attributes are copied to the subgraphs by default.
"""
for c in scc(G):
if copy:
yield G.subgraph(c).copy()
else:
yield G.subgraph(c)
def no_connected_components(G):
"""Returns the number of connected components.
Parameters
----------
G : NetworkX graph
An undirected graph.
Returns
-------
n : integer
Number of connected components
See Also
--------
connected_components
number_weakly_connected_components
number_strongly_connected_components
Notes
-----
For undirected graphs only.
"""
return sum(1 for cc in scc(G))
# In[16]:
'''
for i in range(0, len(G)):
graphs = connected_component_subgraphs(k_core(G,k=i))
if(graphs is None):
break
else:
for g in graphs:
if(g.edges):
print("This is the " + str(i) + " core of graph")
# print(g.edges())
SCG=nx.DiGraph()
scs = []
for (r,s) in g.edges():
if(r not in scs):
SCG.add_node(r)
scs.append(r)
if(s not in scs):
SCG.add_node(s)
scs.append(s)
for (u,v) in g.edges():
SCG.add_edges_from([(u,v)])
print("The total no. of vertices of graph is: " + str(len(G.nodes())) + ". \nThe total no. of vertices in core graph is:" + str(len(g.nodes())))
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(SCG, with_labels=True, font_size=12)
plt.show()
# ### Sample Dataset:
# In[17]:
G=nx.Graph()
for i in range(1, 22):
G.add_node(i)
G.add_edges_from([(2,3)])
G.add_edges_from([(3,4)])
G.add_edges_from([(4,5)])
G.add_edges_from([(5,6)])
G.add_edges_from([(6,3)])
G.add_edges_from([(7,8)])
G.add_edges_from([(8,9)])
G.add_edges_from([(8,10)])
G.add_edges_from([(8,11)])
G.add_edges_from([(9,10)])
G.add_edges_from([(10,11)])
G.add_edges_from([(10,12)])
G.add_edges_from([(10,13)])
G.add_edges_from([(10,14)])
G.add_edges_from([(12,13)])
G.add_edges_from([(13,14)])
G.add_edges_from([(12,14)])
G.add_edges_from([(11,15)])
G.add_edges_from([(11,16)])
G.add_edges_from([(11,17)])
G.add_edges_from([(15,16)])
G.add_edges_from([(16,17)])
G.add_edges_from([(15,17)])
G.add_edges_from([(14,18)])
G.add_edges_from([(17,18)])
G.add_edges_from([(14,19)])
G.add_edges_from([(18,19)])
G.add_edges_from([(19,20)])
G.add_edges_from([(19,21)])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
# plt.savefig("C:\\Users\\user\\Documents\\g.pdf", bbox_inches="tight")
plt.show()
# In[18]:
for i in range(0, len(G.nodes)-1):
graphs = list(ccs(nx.k_core(G,k=i)))
if(graphs is None):
break
else:
for g in graphs:
print("This is the " + str(i) + " core of graph")
print(g.edges())
SCG=nx.Graph()
scs = []
for (r,s) in g.edges():
if(r not in scs):
SCG.add_node(r)
scs.append(r)
if(s not in scs):
SCG.add_node(s)
scs.append(s)
for (u,v) in g.edges():
SCG.add_edges_from([(u,v)])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(SCG, with_labels=True, font_size=12)
plt.show()
# In[19]:
G=nx.DiGraph()
for i in range(1, 22):
G.add_node(i)
G.add_edges_from([(2,3)])
G.add_edges_from([(3,4)])
G.add_edges_from([(4,5)])
G.add_edges_from([(5,6)])
G.add_edges_from([(6,3)])
G.add_edges_from([(7,8)])
G.add_edges_from([(8,9)])
G.add_edges_from([(8,10)])
G.add_edges_from([(8,11)])
G.add_edges_from([(9,10)])
G.add_edges_from([(10,11)])
G.add_edges_from([(10,12)])
G.add_edges_from([(10,13)])
G.add_edges_from([(10,14)])
G.add_edges_from([(12,13)])
G.add_edges_from([(13,14)])
G.add_edges_from([(12,14)])
G.add_edges_from([(11,15)])
G.add_edges_from([(11,16)])
G.add_edges_from([(11,17)])
G.add_edges_from([(15,16)])
G.add_edges_from([(16,17)])
G.add_edges_from([(15,17)])
G.add_edges_from([(14,18)])
G.add_edges_from([(17,18)])
G.add_edges_from([(14,19)])
G.add_edges_from([(18,19)])
G.add_edges_from([(19,20)])
G.add_edges_from([(19,21)])
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(G, with_labels=True, font_size=12)
# plt.savefig("C:\\Users\\user\\Documents\\g.pdf", bbox_inches="tight")
plt.show()
'''
# In[ ]:
'''
for i in range(0, len(G)):
graphs = connected_component_subgraphs(k_core(G,k=i))
if(graphs is None):
break
else:
for g in graphs:
if(g.edges):
print("This is the " + str(i) + " core of graph")
# print(g.edges())
SCG=nx.DiGraph()
scs = []
for (r,s) in g.edges():
if(r not in scs):
SCG.add_node(r)
scs.append(r)
if(s not in scs):
SCG.add_node(s)
scs.append(s)
for (u,v) in g.edges():
SCG.add_edges_from([(u,v)])
print("The total no. of vertices of graph is: " + str(len(G.nodes())) + ". \nThe total no. of vertices in core graph is:" + str(len(g.nodes())))
plt.figure(num=None, figsize=(20, 20), dpi=80)
plt.axis('off')
fig = plt.figure(1)
nx.draw(SCG, with_labels=True, font_size=12)
plt.show()
'''
# ### Real Life Dataset:
# In[ ]:
#df = pd.read_csv("/root/.encrypted/.pythonSai/ira_tweets_csv_hashed.csv", sep=" ", header=None, skiprows=1, names=["tweetid", "userid"])
#df = df[["tweetid", "userid"]].dropna()
#print(df.head())
'''
import csv
from itertools import dropwhile, takewhile
def getstuff(filename, criterion):
with open(filename, "r") as csvfile:
datareader = csv.reader(csvfile)
yield next(datareader) # yield the header row
# first row, plus any subsequent rows that match, then stop
# reading altogether
# Python 2: use `for row in takewhile(...): yield row` instead
# instead of `yield from takewhile(...)`.
print(row[18])
print(criterion)
yield from takewhile(
lambda r: r[18] == criterion,
dropwhile(lambda r: r[18] != criterion, datareader))
return
def getdata(filename, criteria):
for criterion in criteria:
for row in getstuff(filename, criterion):
print(row[1])
yield row
import pdb
pdb.set_trace()
count = 0
for row in getdata("/root/.encrypted/.pythonSai/ira_tweets_csv_hashed.csv", "TRUE"):
count+=1
#print()
print(count)
'''
def kcoreDirectedGraph(G, k):
rrf = False
U = G.copy()
gil = list(U.in_degree())
if(k>=0 and type(k)==int):
for (i,j) in gil:
if(j<k):
U.remove_node(i)
rrf = True
if(rrf == True):
return kcoreDirectedGraph(U, k)
else:
S = G.copy()
for i in list(S.nodes()):
if(not [gni for gni in gil if gni[0]==i]):
S.remove_node(i)
if(S.nodes() is not None):
return S
else:
print("Err")
return None
def connected_component_subgraphs(G, copy=True):
for c in scc(G):
if copy:
yield G.subgraph(c).copy()
else:
yield G.subgraph(c)
'''
import pdb
#pdb.set_trace()
'''
import time
import pdb
#oba = []
rba = []
aih = False
df = pd.read_csv("/root/.encrypted/.pythonSai/ira_tweets_csv_hashed.csv", sep=",", header=None, usecols=[1,18,19], chunksize=2000, skiprows=1, names=["userid","is_retweet","retweet_userid"])
#df.to_csv('my_parsed.csv', mode='a', header=False)
df_lst = pd.DataFrame(columns=["tweetid","is_retweet", "retweet_userid"])
pd.set_option('display.max_columns', 100)
'''
#Add Retweet Bots to csv file:
for df_ in df:
#pdb.set_trace()
t0 = time.time()
#print("hello")
#t1 = time.time()
#print(t1-t0)
#pdb.set_trace()
#print(df_)
#break
#tmp_df = (df_.rename(columns={col: col.lower() for col in df_.columns}).pipe(lambda x: x[x.is_retweet == "TRUE"] ))
#for index, row in df.iterrows():
#if(row["is_retweet"]==True):
#df_lst = df_lst.append(row, ignore_index=True)
#print(df_lst)
#print(df_lst)
df_lst = df_.loc[df_["is_retweet"].map(lambda x: x==True)]
#for index, row in df_lst.iterrows():
# if(row["retweet_userid"] not in rba):
# rba.append(row["retweet_userid"])
# oba.append(row["retweet_userid"])
#for bui in oba:
#print(type(bui))
# for row in df_.itertuples():
# if(row.userid==bui and bui in oba):
# oba.remove(bui)
#df_lst.append(row)
#df_lst = df[df.columns[df["is_retweet"].isin([True])]]
#print(df_lst.loc[df_lst["is_retweet"].isin([True])])
#df_lst.drop(df_lst.columns[[0, 2]], axis=1)
#if(aih is False):
#df_lst.to_csv('my_parsed3.csv', mode='a', columns=["tweetid","retweet_userid"], header=True)
#aih = True
#else:
df_lst[["userid","retweet_userid"]].to_csv('my_parsed3.csv', mode='a', header=False, index=False)
t1 = time.time()
#print(t1-t0)
'''
'''
def converter(x):
if isinstance(x, pd.Series):
return str(x.values)
else:
return x
'''
'''
#Add originator bots to csv file:
dfa = pd.read_csv("/root/.encrypted/.pythonSai/my_parsed.csv", sep=",", header=None, usecols=[0,1,2,3,4], chunksize=2000, skiprows=1, names=["tweetid","userid","is_retweet","retweet_userid","retweet_tweetid"])
for dfa in df:
#t0 = time.time()
for df_ in df:
#t3 = time.time()
df_lst = dfa.loc[dfa["retweet_userid"].map(lambda x: str(x)==df_["userid"].apply(converter).unique().tostring())]
df_lst.to_csv('my_parsed1.csv', mode='a', header=False)
#t2 = time.time()
#print(t2-t3)
t1 = time.time()
#print(t1-t0)
#df_lst.to_csv('my_parsed1.csv', mode='a', header=False)
#for df_ in df:
# print(type(df_["userid"].apply(converter).unique().tostring()))
'''
'''
for bui in oba:
for df_ in df:
dfa.append(df_.loc[df_["userid"].map(lambda x: x==bui, oba.remove(bui))])
break
'''
'''
dfa = pd.read_csv("/root/.encrypted/.pythonSai/my_parsed.csv", sep=",", header=None, usecols=[0,1,18,19,20], chunksize=2000, skiprows=1, names=["tweetid","userid","is_retweet","retweet_userid","retweet_tweetid"])
for dfn in dfa:
for df_ in df:
dfo = dfn.loc[dfn["retweet_userid"].map(lambda x: )]
dfo.to_csv('my_parsed.csv', mode='a', header=False)
'''
#Constructing the graph:
dfn = | pd.read_csv("/root/.encrypted/.pythonSai/my_parsed3.csv", sep=",", header=None, chunksize=2000, names=["userid","retweet_userid"]) | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
try:
import fastparquet as fp
except ImportError: # pragma: no cover
fp = None
from .... import dataframe as md
from .... import tensor as mt
from ...datasource.read_csv import DataFrameReadCSV
from ...datasource.read_sql import DataFrameReadSQL
from ...datasource.read_parquet import DataFrameReadParquet
@pytest.mark.parametrize('chunk_size', [2, (2, 3)])
def test_set_index(setup, chunk_size):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=chunk_size)
expected = df1.set_index('y', drop=True)
df3 = df2.set_index('y', drop=True)
pd.testing.assert_frame_equal(
expected, df3.execute().fetch())
expected = df1.set_index('y', drop=False)
df4 = df2.set_index('y', drop=False)
pd.testing.assert_frame_equal(
expected, df4.execute().fetch())
expected = df1.set_index('y')
df2.set_index('y', inplace=True)
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
def test_iloc_getitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1.iloc[1]
df3 = df2.iloc[1]
result = df3.execute(extra_config={'check_series_name': False}).fetch()
pd.testing.assert_series_equal(
expected, result)
# plain index on axis 1
expected = df1.iloc[:2, 1]
df4 = df2.iloc[:2, 1]
pd.testing.assert_series_equal(
expected, df4.execute().fetch())
# slice index
expected = df1.iloc[:, 2:4]
df5 = df2.iloc[:, 2:4]
pd.testing.assert_frame_equal(
expected, df5.execute().fetch())
# plain fancy index
expected = df1.iloc[[0], [0, 1, 2]]
df6 = df2.iloc[[0], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df6.execute().fetch())
# plain fancy index with shuffled order
expected = df1.iloc[[0], [1, 2, 0]]
df7 = df2.iloc[[0], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df7.execute().fetch())
# fancy index
expected = df1.iloc[[1, 2], [0, 1, 2]]
df8 = df2.iloc[[1, 2], [0, 1, 2]]
pd.testing.assert_frame_equal(
expected, df8.execute().fetch())
# fancy index with shuffled order
expected = df1.iloc[[2, 1], [1, 2, 0]]
df9 = df2.iloc[[2, 1], [1, 2, 0]]
pd.testing.assert_frame_equal(
expected, df9.execute().fetch())
# one fancy index
expected = df1.iloc[[2, 1]]
df10 = df2.iloc[[2, 1]]
pd.testing.assert_frame_equal(
expected, df10.execute().fetch())
# plain index
expected = df1.iloc[1, 2]
df11 = df2.iloc[1, 2]
assert expected == df11.execute().fetch()
# bool index array
expected = df1.iloc[[True, False, True], [2, 1]]
df12 = df2.iloc[[True, False, True], [2, 1]]
pd.testing.assert_frame_equal(
expected, df12.execute().fetch())
# bool index array on axis 1
expected = df1.iloc[[2, 1], [True, False, True]]
df14 = df2.iloc[[2, 1], [True, False, True]]
pd.testing.assert_frame_equal(
expected, df14.execute().fetch())
# bool index
expected = df1.iloc[[True, False, True], [2, 1]]
df13 = df2.iloc[md.Series([True, False, True], chunk_size=1), [2, 1]]
pd.testing.assert_frame_equal(
expected, df13.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3).iloc[:3]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[:3])
series = md.Series(data, chunk_size=3).iloc[4]
assert series.execute().fetch() == data.iloc[4]
series = md.Series(data, chunk_size=3).iloc[[2, 3, 4, 9]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[2, 3, 4, 9]])
series = md.Series(data, chunk_size=3).iloc[[4, 3, 9, 2]]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[[4, 3, 9, 2]])
series = md.Series(data).iloc[5:]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
series = md.Series(data).iloc[selection]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# bool index
series = md.Series(data).iloc[md.Series(selection, chunk_size=4)]
pd.testing.assert_series_equal(
series.execute().fetch(), data.iloc[selection])
# test index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)[:3]
pd.testing.assert_index_equal(
index.execute().fetch(), data[:3])
index = md.Index(data, chunk_size=3)[4]
assert index.execute().fetch() == data[4]
index = md.Index(data, chunk_size=3)[[2, 3, 4, 9]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[2, 3, 4, 9]])
index = md.Index(data, chunk_size=3)[[4, 3, 9, 2]]
pd.testing.assert_index_equal(
index.execute().fetch(), data[[4, 3, 9, 2]])
index = md.Index(data)[5:]
pd.testing.assert_index_equal(
index.execute().fetch(), data[5:])
# bool index array
selection = np.random.RandomState(0).randint(2, size=10, dtype=bool)
index = md.Index(data)[selection]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
index = md.Index(data)[mt.tensor(selection, chunk_size=4)]
pd.testing.assert_index_equal(
index.execute().fetch(), data[selection])
def test_iloc_setitem(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
expected = df1
expected.iloc[1] = 100
df2.iloc[1] = 100
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# slice index
expected.iloc[:, 2:4] = 1111
df2.iloc[:, 2:4] = 1111
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain fancy index
expected.iloc[[0], [0, 1, 2]] = 2222
df2.iloc[[0], [0, 1, 2]] = 2222
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# fancy index
expected.iloc[[1, 2], [0, 1, 2]] = 3333
df2.iloc[[1, 2], [0, 1, 2]] = 3333
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# plain index
expected.iloc[1, 2] = 4444
df2.iloc[1, 2] = 4444
pd.testing.assert_frame_equal(
expected, df2.execute().fetch())
# test Series
data = pd.Series(np.arange(10))
series = md.Series(data, chunk_size=3)
series.iloc[:3] = 1
data.iloc[:3] = 1
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[4] = 2
data.iloc[4] = 2
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[[2, 3, 4, 9]] = 3
data.iloc[[2, 3, 4, 9]] = 3
pd.testing.assert_series_equal(
series.execute().fetch(), data)
series.iloc[5:] = 4
data.iloc[5:] = 4
pd.testing.assert_series_equal(
series.execute().fetch(), data)
# test Index
data = pd.Index(np.arange(10))
index = md.Index(data, chunk_size=3)
with pytest.raises(TypeError):
index[5:] = 4
def test_loc_getitem(setup):
rs = np.random.RandomState(0)
# index and columns are labels
raw1 = pd.DataFrame(rs.randint(10, size=(5, 4)),
index=['a1', 'a2', 'a3', 'a4', 'a5'],
columns=['a', 'b', 'c', 'd'])
# columns are labels
raw2 = raw1.copy()
raw2.reset_index(inplace=True, drop=True)
# columns are non unique and monotonic
raw3 = raw1.copy()
raw3.columns = ['a', 'b', 'b', 'd']
# columns are non unique and non monotonic
raw4 = raw1.copy()
raw4.columns = ['b', 'a', 'b', 'd']
# index that is timestamp
raw5 = raw1.copy()
raw5.index = pd.date_range('2020-1-1', periods=5)
raw6 = raw1[:0]
df1 = md.DataFrame(raw1, chunk_size=2)
df2 = md.DataFrame(raw2, chunk_size=2)
df3 = md.DataFrame(raw3, chunk_size=2)
df4 = md.DataFrame(raw4, chunk_size=2)
df5 = md.DataFrame(raw5, chunk_size=2)
df6 = md.DataFrame(raw6)
df = df2.loc[3, 'b']
result = df.execute().fetch()
expected = raw2.loc[3, 'b']
assert result == expected
df = df1.loc['a3', 'b']
result = df.execute(extra_config={'check_shape': False}).fetch()
expected = raw1.loc['a3', 'b']
assert result == expected
# test empty list
df = df1.loc[[]]
result = df.execute().fetch()
expected = raw1.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[[]]
result = df.execute().fetch()
expected = raw2.loc[[]]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[1:4, 'b':'d']
result = df.execute().fetch()
expected = raw2.loc[1:4, 'b': 'd']
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:4, 'b':]
result = df.execute().fetch()
expected = raw2.loc[:4, 'b':]
pd.testing.assert_frame_equal(result, expected)
# slice on axis index whose index_value does not have value
df = df1.loc['a2':'a4', 'b':]
result = df.execute().fetch()
expected = raw1.loc['a2':'a4', 'b':]
pd.testing.assert_frame_equal(result, expected)
df = df2.loc[:, 'b']
result = df.execute().fetch()
expected = raw2.loc[:, 'b']
pd.testing.assert_series_equal(result, expected)
# 'b' is non-unique
df = df3.loc[:, 'b']
result = df.execute().fetch()
expected = raw3.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# 'b' is non-unique, and non-monotonic
df = df4.loc[:, 'b']
result = df.execute().fetch()
expected = raw4.loc[:, 'b']
pd.testing.assert_frame_equal(result, expected)
# label on axis 0
df = df1.loc['a2', :]
result = df.execute().fetch()
expected = raw1.loc['a2', :]
pd.testing.assert_series_equal(result, expected)
# label-based fancy index
df = df2.loc[[3, 0, 1], ['c', 'a', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[3, 0, 1], ['c', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index, asc sorted
df = df2.loc[[0, 1, 3], ['a', 'c', 'd']]
result = df.execute().fetch()
expected = raw2.loc[[0, 1, 3], ['a', 'c', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index in which non-unique exists
selection = rs.randint(2, size=(5,), dtype=bool)
df = df3.loc[selection, ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
df = df3.loc[md.Series(selection), ['b', 'a', 'd']]
result = df.execute().fetch()
expected = raw3.loc[selection, ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# label-based fancy index on index
# whose index_value does not have value
df = df1.loc[['a3', 'a1'], ['b', 'a', 'd']]
result = df.execute(extra_config={'check_nsplits': False}).fetch()
expected = raw1.loc[['a3', 'a1'], ['b', 'a', 'd']]
pd.testing.assert_frame_equal(result, expected)
# get timestamp by str
df = df5.loc['20200101']
result = df.execute(extra_config={'check_series_name': False}).fetch(
extra_config={'check_series_name': False})
expected = raw5.loc['20200101']
pd.testing.assert_series_equal(result, expected)
# get timestamp by str, return scalar
df = df5.loc['2020-1-1', 'c']
result = df.execute().fetch()
expected = raw5.loc['2020-1-1', 'c']
assert result == expected
# test empty df
df = df6.loc[[]]
result = df.execute().fetch()
expected = raw6.loc[[]]
pd.testing.assert_frame_equal(result, expected)
def test_dataframe_getitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
data2 = data.copy()
data2.index = pd.date_range('2020-1-1', periods=10)
mdf = md.DataFrame(data2, chunk_size=3)
series1 = df['c2']
pd.testing.assert_series_equal(
series1.execute().fetch(), data['c2'])
series2 = df['c5']
pd.testing.assert_series_equal(
series2.execute().fetch(), data['c5'])
df1 = df[['c1', 'c2', 'c3']]
pd.testing.assert_frame_equal(
df1.execute().fetch(), data[['c1', 'c2', 'c3']])
df2 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df2.execute().fetch(), data[['c3', 'c2', 'c1']])
df3 = df[['c1']]
pd.testing.assert_frame_equal(
df3.execute().fetch(), data[['c1']])
df4 = df[['c3', 'c1', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df4.execute().fetch(), data[['c3', 'c1', 'c2', 'c1']])
df5 = df[np.array(['c1', 'c2', 'c3'])]
pd.testing.assert_frame_equal(
df5.execute().fetch(), data[['c1', 'c2', 'c3']])
df6 = df[['c3', 'c2', 'c1']]
pd.testing.assert_frame_equal(
df6.execute().fetch(), data[['c3', 'c2', 'c1']])
df7 = df[1:7:2]
pd.testing.assert_frame_equal(
df7.execute().fetch(), data[1:7:2])
series3 = df['c1'][0]
assert series3.execute().fetch() == data['c1'][0]
df8 = mdf[3:7]
pd.testing.assert_frame_equal(
df8.execute().fetch(), data2[3:7])
df9 = mdf['2020-1-2': '2020-1-5']
pd.testing.assert_frame_equal(
df9.execute().fetch(), data2['2020-1-2': '2020-1-5'])
def test_dataframe_getitem_bool(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data = data.c1 > 0.5
mask = md.Series(mask_data, chunk_size=2)
# getitem by mars series
assert df[mask].execute().fetch().shape == data[mask_data].shape
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by pandas series
pd.testing.assert_frame_equal(
df[mask_data].execute().fetch(), data[mask_data])
# getitem by mars series with alignment but no shuffle
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=range(9, -1, -1))
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch(), data[mask_data])
# getitem by mars series with shuffle alignment
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by mars series with shuffle alignment and extra element
mask_data = pd.Series([True, True, True, False, False, True, True, False, False, True, False],
index=[0, 3, 6, 2, 9, 8, 5, 7, 1, 4, 10])
mask = md.Series(mask_data, chunk_size=2)
pd.testing.assert_frame_equal(
df[mask].execute().fetch().sort_index(), data[mask_data])
# getitem by DataFrame with all bool columns
r = df[df > 0.5]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data > 0.5])
# getitem by tensor mask
r = df[(df['c1'] > 0.5).to_tensor()]
result = r.execute().fetch()
pd.testing.assert_frame_equal(result, data[data['c1'] > 0.5])
def test_dataframe_getitem_using_attr(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'key', 'dtypes', 'size'])
df = md.DataFrame(data, chunk_size=2)
series1 = df.c2
pd.testing.assert_series_equal(
series1.execute().fetch(), data.c2)
# accessing column using attribute shouldn't overwrite existing attributes
assert df.key == getattr(getattr(df, '_data'), '_key')
assert df.size == data.size
pd.testing.assert_series_equal(df.dtypes, data.dtypes)
# accessing non-existing attributes should trigger exception
with pytest.raises(AttributeError):
_ = df.zzz # noqa: F841
def test_series_getitem(setup):
data = pd.Series(np.random.rand(10))
series = md.Series(data)
assert series[1].execute().fetch() == data[1]
data = pd.Series(np.random.rand(10), name='a')
series = md.Series(data, chunk_size=4)
for i in range(10):
series1 = series[i]
assert series1.execute().fetch() == data[i]
series2 = series[[0, 1, 2, 3, 4]]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[[0, 1, 2, 3, 4]])
series3 = series[[4, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[[4, 3, 2, 1, 0]])
series4 = series[[1, 2, 3, 2, 1, 0]]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[[1, 2, 3, 2, 1, 0]])
#
index = ['i' + str(i) for i in range(20)]
data = pd.Series(np.random.rand(20), index=index, name='a')
series = md.Series(data, chunk_size=3)
for idx in index:
series1 = series[idx]
assert series1.execute().fetch() == data[idx]
selected = ['i1', 'i2', 'i3', 'i4', 'i5']
series2 = series[selected]
pd.testing.assert_series_equal(
series2.execute().fetch(), data[selected])
selected = ['i4', 'i7', 'i0', 'i1', 'i5']
series3 = series[selected]
pd.testing.assert_series_equal(
series3.execute().fetch(), data[selected])
selected = ['i0', 'i1', 'i5', 'i4', 'i0', 'i1']
series4 = series[selected]
pd.testing.assert_series_equal(
series4.execute().fetch(), data[selected])
selected = ['i0']
series5 = series[selected]
pd.testing.assert_series_equal(
series5.execute().fetch(), data[selected])
data = pd.Series(np.random.rand(10,))
series = md.Series(data, chunk_size=3)
selected = series[:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:2])
selected = series[2:8:2]
pd.testing.assert_series_equal(
selected.execute().fetch(), data[2:8:2])
data = pd.Series(np.random.rand(9), index=['c' + str(i) for i in range(9)])
series = md.Series(data, chunk_size=3)
selected = series[:'c2']
pd.testing.assert_series_equal(
selected.execute().fetch(), data[:'c2'])
selected = series['c2':'c9']
pd.testing.assert_series_equal(
selected.execute().fetch(), data['c2':'c9'])
def test_head(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.head().execute().fetch(), data.head())
pd.testing.assert_frame_equal(
df.head(3).execute().fetch(), data.head(3))
pd.testing.assert_frame_equal(
df.head(-3).execute().fetch(), data.head(-3))
pd.testing.assert_frame_equal(
df.head(8).execute().fetch(), data.head(8))
pd.testing.assert_frame_equal(
df.head(-8).execute().fetch(), data.head(-8))
pd.testing.assert_frame_equal(
df.head(13).execute().fetch(), data.head(13))
pd.testing.assert_frame_equal(
df.head(-13).execute().fetch(), data.head(-13))
def test_tail(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
pd.testing.assert_frame_equal(
df.tail().execute().fetch(), data.tail())
pd.testing.assert_frame_equal(
df.tail(3).execute().fetch(), data.tail(3))
pd.testing.assert_frame_equal(
df.tail(-3).execute().fetch(), data.tail(-3))
pd.testing.assert_frame_equal(
df.tail(8).execute().fetch(), data.tail(8))
pd.testing.assert_frame_equal(
df.tail(-8).execute().fetch(), data.tail(-8))
pd.testing.assert_frame_equal(
df.tail(13).execute().fetch(), data.tail(13))
pd.testing.assert_frame_equal(
df.tail(-13).execute().fetch(), data.tail(-13))
def test_at(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
data2 = data.copy()
data2.index = np.arange(10)
df2 = md.DataFrame(data2, chunk_size=3)
with pytest.raises(ValueError):
_ = df.at[['i3, i4'], 'c1']
result = df.at['i3', 'c1'].execute().fetch()
assert result == data.at['i3', 'c1']
result = df['c1'].at['i2'].execute().fetch()
assert result == data['c1'].at['i2']
result = df2.at[3, 'c2'].execute().fetch()
assert result == data2.at[3, 'c2']
result = df2.loc[3].at['c2'].execute().fetch()
assert result == data2.loc[3].at['c2']
def test_iat(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
df = md.DataFrame(data, chunk_size=3)
with pytest.raises(ValueError):
_ = df.iat[[1, 2], 3]
result = df.iat[3, 4].execute().fetch()
assert result == data.iat[3, 4]
result = df.iloc[:, 2].iat[3].execute().fetch()
assert result == data.iloc[:, 2].iat[3]
def test_setitem(setup):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c' + str(i) for i in range(5)],
index=['i' + str(i) for i in range(10)])
data2 = np.random.rand(10)
data3 = np.random.rand(10, 2)
df = md.DataFrame(data, chunk_size=3)
df['c3'] = df['c3'] + 1
df['c10'] = 10
df[4] = mt.tensor(data2, chunk_size=4)
df['d1'] = df['c4'].mean()
df['e1'] = data2 * 2
result = df.execute().fetch()
expected = data.copy()
expected['c3'] = expected['c3'] + 1
expected['c10'] = 10
expected[4] = data2
expected['d1'] = data['c4'].mean()
expected['e1'] = data2 * 2
pd.testing.assert_frame_equal(result, expected)
# test set multiple cols with scalar
df = md.DataFrame(data, chunk_size=3)
df[['c0', 'c2']] = 1
df[['c1', 'c10']] = df['c4'].mean()
df[['c11', 'c12']] = mt.tensor(data3, chunk_size=4)
result = df.execute().fetch()
expected = data.copy()
expected[['c0', 'c2']] = 1
expected[['c1', 'c10']] = expected['c4'].mean()
expected[['c11', 'c12']] = data3
pd.testing.assert_frame_equal(result, expected)
# test set multiple rows
df = md.DataFrame(data, chunk_size=3)
df[['c1', 'c4', 'c10']] = df[['c2', 'c3', 'c4']] * 2
result = df.execute().fetch()
expected = data.copy()
expected[['c1', 'c4', 'c10']] = expected[['c2', 'c3', 'c4']] * 2
pd.testing.assert_frame_equal(result, expected)
# test setitem into empty DataFrame
df = md.DataFrame()
df['a'] = md.Series(np.arange(1, 11), chunk_size=3)
pd.testing.assert_index_equal(df.index_value.to_pandas(),
pd.RangeIndex(10))
result = df.execute().fetch()
expected = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import phik
import scipy.stats as sstats
import warnings
class CalidadDatos:
def __init__(self, _base, castNumero=False, diccionarioCast=None,
errores="ignore", formato_fecha=None):
""" Constructor por defecto de la clase CalidadDatos. Esta clase se \
encarga de manejar todas las funciones asociadas a la medición de la \
calidad de los datos en una base de datos
:param base: (dataframe) Base de datos de tipo pandas.DataFrame que será
analizada por la clase CalidadDatos.
:param castNumero: (bool) {True, False}. Valor por defecto: False \
Indica si se desea convertir las columnas de tipos object y \
bool a float, de ser posible
:param diccionarioCast: (dict) { {nombre_columna : tipo_columna} } \
Diccionario donde se especifican los tipos a que se desean \
convertir las columnas (string, numerico, booleano, fecha, \
categorico)
:param errores: (string) {'ignore', 'coerce', 'raise'}\
Valor por defecto: 'ignore'. Indica qué hacer con las columnas \
cuyo tipo no se puede cambiar al solicitado en 'diccionarioCast'
:param formato_fecha: (string). Formato a ser utilizado al hacer cast a variables de fecha.
:return: Objeto del tipo de la clase CalidadDatos
"""
_base = _base.copy()
# Pasar los 'objects' a float, si posible
if castNumero == True:
tipos_columnas = _base.dtypes
tipos_object = tipos_columnas[(tipos_columnas == "object") | (
tipos_columnas == "bool")].index.to_list()
# Pasar las columnas que se puedan a integer
_base[tipos_object] = _base[tipos_object].apply(
lambda x: x.astype("int64", errors="ignore"), axis=0)
# Pasar las columnas qeu se puedan a float
tipos_columnas = _base.dtypes
tipos_object = tipos_columnas[(tipos_columnas == "object") | (
tipos_columnas == "bool")].index.to_list()
_base[tipos_object] = _base[tipos_object].apply(
lambda x: x.astype(float, errors="ignore"), axis=0)
elif castNumero == False:
pass
else:
raise ValueError('"castNumero" tiene que ser True o False')
# Cambiar los tipos de las variables según el diccionario
if isinstance(diccionarioCast, dict):
for s in diccionarioCast:
if diccionarioCast[s] == "string":
_base[s] = _base[s].apply(lambda x: str(x))
elif diccionarioCast[s] == "numerico":
_base[s] = pd.to_numeric(_base[s], errors=errores)
elif diccionarioCast[s] == "booleano":
_base[s] = _base[s].astype("bool")
elif diccionarioCast[s] == "fecha":
_base[s] = pd.to_datetime(
_base[s], format=formato_fecha, errors=errores)
elif diccionarioCast[s] == "categorico":
_base[s] = pd.Categorical(_base[s])
else:
raise ValueError(
'Las llaves de "diccionarioCast" tienen que ser "string", "numerico", "booleano", "fecha" o "categorico" ')
elif diccionarioCast is None:
pass
else:
raise ValueError('"diccionario" tiene que ser tipo "dict"')
self.base = _base
# Tipos de las columnas
def TipoColumnas(self, tipoGeneral=True,
tipoGeneralPython=True, tipoEspecifico=True):
""" Retorna el tipo de dato de cada columna del dataframe. :ref:`Ver ejemplo <calidad_datos.TipoColumnas>`
:param tipoGeneral: (bool) {True, False}, valor por defecto: True. \
Incluye el tipo general de cada columna. Los tipos son: numérico,\
texto, booleano, otro
:param tipoGeneralPython: (bool) {True, False}, valor por defecto: \
True. Incluye el tipo general de cada columna dado por el método\
'dtypes' de Python
:param tipoEspecifico: (bool) {True, False}, valor por defecto: True.\
Incluye el porcentaje de los tres tipos más frecuentes de cada\
columna. Se aplica la función 'type' de Python para cada \
observación.
:return: Dataframe de pandas con los tipos de dato de cada columna.
"""
# base = self.base.copy()
## Funciones generales
# Tipos de columnas según función dtypes
tipos_dtypes = self.base.dtypes.apply(str)
# Lista de los nombres de las columnas
lista_nombres = list(self.base.columns)
#
numero_columnas_base = self.base.shape[0]
##
lista_total = []
# lista_nombres.insert(0, "")
lista_total.append([""] + lista_nombres)
if tipoGeneral == True:
lista_general = []
for s in lista_nombres:
# Si solo hay missing values, poner como 'Otro'
if self.base[s].isnull().sum() == numero_columnas_base:
lista_general.append("Otro")
else:
tipo_para_object = str(
type(
self.base[s].mode(
dropna=True)[0]))
tipo_para_resto = tipos_dtypes[s]
if "int" in tipo_para_resto or "float" in tipo_para_resto:
lista_general.append("Numérico")
elif "str" in tipo_para_object:
lista_general.append("Texto")
elif "bool" in tipo_para_resto:
lista_general.append("Booleano")
elif "date" in tipo_para_resto:
lista_general.append("Fecha")
else:
lista_general.append("Otro")
lista_general.insert(0, "tipo_general")
lista_total.append(lista_general)
elif tipoGeneral == False:
pass
else:
raise ValueError('"tipoGeneral" tiene que ser True o False')
# TIpo general de Python
if tipoGeneralPython == True:
lista_python = list(tipos_dtypes.copy())
lista_python.insert(0, "tipo_general_python")
lista_total.append(lista_python)
elif tipoGeneralPython == False:
pass
else:
raise ValueError('"tipoGeneralPython" tiene que ser True o False')
# Tipo específico Python
if tipoEspecifico == True:
lista_especifico_1 = []
lista_especifico_2 = []
lista_especifico_3 = []
lista_especifico_4 = []
lista_especifico_5 = []
for s in lista_nombres:
tip = self.base[s].fillna("nan").apply(
lambda x: x if x == "nan" else type(x)).value_counts(
normalize=True, dropna=False)
tip_1 = "{1}: {0}%".format(round(float(tip.iloc[0] * 100), 2),
str(tip.index[0]).replace("<class ", "").replace(">", ""))
lista_especifico_1.append(tip_1)
try:
tip_2 = "{1}: {0}%".format(round(float(tip.iloc[1] * 100), 2),
str(tip.index[1]).replace("<class ", "").replace(">", ""))
lista_especifico_2.append(tip_2)
except BaseException:
lista_especifico_2.append("")
try:
tip_3 = "{1}: {0}%".format(round(float(tip.iloc[2] * 100), 2),
str(tip.index[2]).replace("<class ", "").replace(">", ""))
lista_especifico_3.append(tip_3)
except BaseException:
lista_especifico_3.append("")
try:
tip_4 = "{1}: {0}%".format(round(float(tip.iloc[3] * 100), 2),
str(tip.index[3]).replace("<class ", "").replace(">", ""))
lista_especifico_4.append(tip_4)
except BaseException:
lista_especifico_4.append("")
try:
tip_5 = "{1}: {0}%".format(round(float(tip.iloc[4] * 100), 2),
str(tip.index[4]).replace("<class ", "").replace(">", ""))
lista_especifico_5.append(tip_5)
except BaseException:
lista_especifico_5.append("")
lista_especifico_1.insert(0, "tipo_especifico_1")
lista_total.append(lista_especifico_1)
if all(q == "" for q in lista_especifico_2):
pass
else:
lista_especifico_2.insert(0, "tipo_especifico_2")
lista_total.append(lista_especifico_2)
if all(q == "" for q in lista_especifico_3):
pass
else:
lista_especifico_3.insert(0, "tipo_especifico_3")
lista_total.append(lista_especifico_3)
if all(q == "" for q in lista_especifico_4):
pass
else:
lista_especifico_4.insert(0, "tipo_especifico_4")
lista_total.append(lista_especifico_4)
if all(q == "" for q in lista_especifico_5):
pass
else:
lista_especifico_5.insert(0, "tipo_especifico_5")
lista_total.append(lista_especifico_5)
del tip
elif tipoEspecifico == False:
pass
else:
raise ValueError('"tipoEspecifico" tiene que ser True o False')
tips = pd.DataFrame(lista_total).T.set_index(keys=0, drop=True)
columnas = list(tips.iloc[0])
tips.columns = columnas
tips = tips.drop(tips.index[0])
return (tips)
# valores únicos en cada columna
# sin missing values
def ValoresUnicos(self, faltantes=False):
""" Calcula la cantidad de valores únicos de cada columna del dataframe. \
:ref:`Ver ejemplo <calidad_datos.ValoresUnicos>`
:param faltantes: (bool) {True, False}, valor por defecto: False. \
Indica si desea tener en cuenta los valores faltantes en el \
conteo de valores únicos.
:return: serie de pandas con la cantidad de valores únicos de cada columna.
"""
if faltantes == False:
unicos_columnas = self.base.apply(
lambda x: len(x.value_counts()), axis=0)
elif faltantes == True:
unicos_columnas = self.base.apply(
lambda x: len(x.value_counts(dropna=False)), axis=0)
else:
raise ValueError('"faltantes" tiene que ser True o False')
return (unicos_columnas)
# Missing values
def ValoresFaltantes(self, numero=False):
""" Calcula el porcentaje/número de valores faltantes de cada columna \
del dataframe. :ref:`Ver ejemplo <calidad_datos.ValoresFaltantes>`
:param numero: (bool) {True, False}, valor por defecto: False. Si el \
valor es False el resultado se expresa como un cociente, si el \
valor es True el valor se expresa como una cantidad de \
registros (número entero).
:return: serie de pandas con la cantidad/cociente de valores \
faltantes de cada columna.
"""
base = self.base.copy()
if numero == False:
missing_columnas = pd.isnull(base).sum() / len(base)
elif numero == True:
missing_columnas = pd.isnull(base).sum()
else:
raise ValueError('"cociente" tiene que ser True o False')
return (missing_columnas)
# Porcentaje y número de filas y columnas no únicas
def CantidadDuplicados(self, eje=0, numero=False):
""" Retorna el porcentaje/número de \
filas o columnas duplicadas (repetidas) en el dataframe. \
:ref:`Ver ejemplo <calidad_datos.CantidadDuplicados>`
:param eje: (int) {1, 0}, valor por defecto: 0. Si el valor \
es 1 la validación se realiza por columnas, si el valor es \
0 la validación se realiza por filas.
:param numero: (bool) {True, False}, valor por defecto: False. Si el \
valor es False el resultado se expresa como un cociente, si el \
valor es True el valor se expresa como una cantidad de \
registros (número entero).
:return: (int o float) resultado de unicidad.
"""
base = self.base.copy()
# Revisar si hay columnas con tipos diccionario o lista para
# convertirlas a string
for s in base.columns:
tip = str(type(self.base[s].value_counts(dropna=False).index[0])).replace("<class ", "").replace(">",
"").replace(
"'", "")
if tip == "dict" or tip == "list":
base[s] = base[s].apply(str)
else:
pass
# Proporcion (decimal) de columnas repetidas
if eje == 1 and numero == False:
no_unic_columnas = base.T.duplicated(keep="first")
cols = no_unic_columnas[no_unic_columnas].shape[0] / base.shape[1]
# Número de columnas repetidas
elif eje == 1 and numero == True:
no_unic_columnas = base.T.duplicated(keep="first")
cols = no_unic_columnas[no_unic_columnas].shape[0]
# Proporción de filas repetidas
elif eje == 0 and numero == False:
no_unic_filas = base.duplicated(keep="first")
cols = no_unic_filas[no_unic_filas].shape[0] / base.shape[0]
# Número de filas repetidas
elif eje == 0 and numero == True:
no_unic_filas = base.duplicated(keep="first")
cols = no_unic_filas[no_unic_filas].shape[0]
else:
raise ValueError(
'"eje" tiene que ser 1 o 0 y "numero" tiene que ser True o False')
return (cols)
# Matching de columnas y filas no únicas
def EmparejamientoDuplicados(self, col=False):
""" Retorna las columnas o filas que presenten valores duplicados del \
dataframe. :ref:`Ver ejemplo <calidad_datos.EmparejamientoDuplicados>`
:param col: (bool) {True, False}, valor por defecto: False. Si el valor \
es True la validación se realiza por columnas, si el valor es \
False la validación se realiza por filas.
:return: matriz (dataframe) que relaciona las indices de filas/nombre \
de columnas que presentan valores duplicados.
"""
base = self.base.copy()
# Revisar si hay columnas con tipos diccionario o lista para
# convertirlas a string
for s in base.columns:
tip = str(type(self.base[s].value_counts(dropna=False).index[0])).replace("<class ", "").replace(">",
"").replace(
"'", "")
if tip == "dict" or tip == "list":
base[s] = base[s].apply(str)
else:
pass
# Obtener todos los duplicados, sin hacer todavía el emparejamiento
if col == True:
dupli = base.T.duplicated(keep=False)
elif col == False:
dupli = base.duplicated(keep=False)
else:
raise ValueError('"col" tiene que ser True o False')
# Revisar si hay duplicados o no. Parar si no hay
dupli = dupli[dupli]
if dupli.sum() == 0:
if col == True:
print("No hay columnas duplicadas")
return
elif col == False:
print("No hay filas duplicadas")
return
else:
raise ValueError('"col" tiene que ser True o False')
else:
pass
lista_duplicados = []
for s in dupli.index:
for ss in dupli.index:
if col == True:
if base[s].equals(base[ss]) and s != ss:
lista_duplicados.append([s, ss])
elif col == False:
if base.iloc[s].equals(base.iloc[ss]) and s != ss:
lista_duplicados.append([s, ss])
else:
pass
if col == False:
lista_duplicados = sorted(lista_duplicados)
else:
pass
dic = {}
for s in dupli.index:
dic[s] = []
for s in dupli.index:
for i in range(len(lista_duplicados)):
if s in lista_duplicados[i]:
dic[s].append(lista_duplicados[i])
for s in dic:
lista = [q for l in dic[s] for q in l]
dic[s] = list(set(lista))
if col == True:
lista_listas = [q for q in dic.values()]
else:
lista_listas = [sorted(q) for q in dic.values()]
for i in range(len(lista_listas)):
for ii in range(len(lista_listas[i])):
lista_listas[i][ii] = str(lista_listas[i][ii])
df = pd.DataFrame(
lista_listas).drop_duplicates().reset_index(drop=True)
df = df.T
if col == True:
lista_columnas_df = [
"Columnas iguales {0}".format(q) for q in range(
1, df.shape[1] + 1)]
df.columns = lista_columnas_df
else:
lista_columnas_df = [
"Filas iguales {0}".format(q) for q in range(
1, df.shape[1] + 1)]
df.columns = lista_columnas_df
# Quitar los 'nan' del
df = df.apply(lambda x: x.replace(np.nan, ""))
return (df)
# CONSISTENCIA. Porcentaje de outliers
def ValoresExtremos(self, extremos="ambos", numero=False):
""" Calcula el porcentaje o cantidad de outliers de cada columna numérica \
(las columnas con números en formato string se intentarán transformar \
a columnas numéricas). :ref:`Ver ejemplo <calidad_datos.ValoresExtremos>`
:param extremos: (str) {'superior', 'inferior', 'ambos'}, valor por \
defecto: 'ambos'. Si el valor es '**inferior**' se tienen en cuenta los \
registros con valor menor al límite inferior calculado por la \
metodología de valor atípico por rango intercuartílico. Si el valor es \
'**superior**' se tienen en cuenta los registros con valor mayor al\
límite superior calculado por la metodología de valor atípico por rango \
intercuartílico. Si el valor es '**ambos**' se tienen en cuenta los \
registros con valor menor al límite inferior calculado por la \
metodología de valor atípico por rango intercuartílico, y también \
aquellos con valor mayor al límite superior calculado por la \
metodología de valor atípico por rango intercuartílico.
:param numero: (bool) {True, False}, valor por defecto: False. Si el valor es \
False el resultado se expresa como una proporcion (en decimales), si el valor es True el \
valor se expresa como una cantidad de registros (número entero).
:return: serie de pandas con la cantidad/proporcion de valores outliers \
de cada columna.
"""
col_tipos = self.TipoColumnas(
tipoGeneral=True, tipoGeneralPython=False, tipoEspecifico=False).iloc[:, 0]
col_num = col_tipos[col_tipos == "Numérico"].index
base_num = self.base[col_num]
if base_num.shape[1] == 0:
print("La base de datos no tiene columnas numéricas")
return
else:
pass
percentiles_25 = base_num.apply(
lambda x: np.nanpercentile(x, 25), axis=0)
percentiles_75 = base_num.apply(
lambda x: np.nanpercentile(x, 75), axis=0)
iqr = percentiles_75 - percentiles_25
iqr_upper = percentiles_75 + iqr * 1.5
iqr_lower = percentiles_25 - iqr * 1.5
dic_outliers = {}
if extremos == "ambos":
for i in range(0, len(iqr)):
dic_outliers[base_num.columns[i]] = (base_num.iloc[:, i] > iqr_upper[i]) | (
base_num.iloc[:, i] < iqr_lower[i])
elif extremos == "superior":
for i in range(0, len(iqr)):
dic_outliers[base_num.columns[i]] = (
base_num.iloc[:, i] > iqr_upper[i])
elif extremos == "inferior":
for i in range(0, len(iqr)):
dic_outliers[base_num.columns[i]] = (
base_num.iloc[:, i] < iqr_lower[i])
else:
raise ValueError(
'"extremos" tiene que ser "ambos", "superior" o "inferior"')
base_outliers = | pd.DataFrame(dic_outliers) | pandas.DataFrame |
"""This provides a class for discretizing data in a convienant way that makes
sense for our spatially referenced data/models.
"""
__all__ = [
'Grid',
]
__displayname__ = 'Mesh Tools'
import numpy as np
import pandas as pd
import properties
import discretize
from .plots import display
from .fileio import GridFileIO
def get_data_range(data):
"""Get the data range for a given ndarray"""
dmin = np.nanmin(data)
dmax = np.nanmax(data)
return (dmin, dmax)
class Grid(discretize.TensorMesh, GridFileIO):
"""
A data structure to store a model space discretization and different
attributes of that model space.
Example:
>>> import wtools
>>> import numpy as np
>>> models = {
'rand': np.random.random(1000).reshape((10,10,10)),
'spatial': np.arange(1000).reshape((10,10,10)),
}
>>> grid = wtools.Grid(models=models)
>>> grid.validate() # Make sure the data object was created successfully
True
Note:
See Jupyter notebooks under the ``examples`` directory
"""
def __init__(self, h=None, x0=(0.,0.,0.), models=None, **kwargs):
if models is not None:
self.models = models
if h is None:
h = []
shp = list(models.values())[0].shape
# Now create tensors if not present
if len(shp) > 0:
h.append(np.ones(shp[0]))
if len(shp) > 1:
h.append(np.ones(shp[1]))
if len(shp) > 2:
h.append(np.ones(shp[2]))
discretize.TensorMesh.__init__(self, h=h, x0=x0, **kwargs)
models = properties.Dictionary(
'The volumetric data as a 3D NumPy arrays in <X,Y,Z> or <i,j,k> ' +
'coordinates. Each key value pair represents a different model for ' +
'the gridded model space. Keys will be treated as the string name of ' +
'the model.',
key_prop=properties.String('Model name'),
value_prop=properties.Array(
'The volumetric data as a 3D NumPy array in <X,Y,Z> or <i,j,k> coordinates.',
shape=('*','*','*')),
required=False
)
@properties.validator
def _validate_models(self):
# Check the models
if self.models is not None:
shp = list(self.models.values())[0].shape
for k, d in self.models.items():
if d.shape != shp:
raise RuntimeError('Validation Failed: dimesnion mismatch between models.')
return True
@property
def keys(self):
"""List of the string names for each of the models"""
return list(self.models.keys())
@property
def shape(self):
"""3D shape of the grid (number of cells in all three directions)"""
return ( self.nCx, self.nCy, self.nCz)
@property
def bounds(self):
"""The bounds of the grid"""
grid = self.gridN
try:
x0, x1 = np.min(grid[:,0]), np.max(grid[:,0])
except:
x0, x1 = 0., 0.
try:
y0, y1 = np.min(grid[:,1]), np.max(grid[:,1])
except:
y0, y1 = 0., 0.
try:
z0, z1 = np.min(grid[:,2]), np.max(grid[:,2])
except:
z0, z1 = 0., 0.
return (x0,x1, y0,y1, z0,z1)
def get_data_range(self, key):
"""Get the data range for a given model"""
data = self.models[key]
return get_data_range(data)
def equal(self, other):
"""Compare this Grid to another Grid"""
return properties.equal(self, other)
def __str__(self):
"""Print this onject as a human readable string"""
self.validate()
fmt = ["<%s instance>" % (self.__class__.__name__)]
fmt.append(" Shape: {}".format(self.shape))
fmt.append(" Origin: {}".format(tuple(self.x0)))
bds = self.bounds
fmt.append(" X Bounds: {}".format((bds[0], bds[1])))
fmt.append(" Y Bounds: {}".format((bds[2], bds[3])))
fmt.append(" Z Bounds: {}".format((bds[4], bds[5])))
if self.models is not None:
fmt.append(" Models: ({})".format(len(self.models.keys())))
for key, val in self.models.items():
dl, dh = self.get_data_range(key)
fmt.append(" '{}' ({}): ({:.3e}, {:.3e})".format(key, val.dtype, dl, dh))
return '\n'.join(fmt)
def __repr__(self):
return self.__str__()
def _repr_html_(self):
self.validate()
fmt = ""
if self.models is not None:
fmt += "<table>"
fmt += "<tr><th>Grid Attributes</th><th>Models</th></tr>"
fmt += "<tr><td>"
fmt += "\n"
fmt += "<table>\n"
fmt += "<tr><th>Attribute</th><th>Values</th></tr>\n"
row = "<tr><td>{}</td><td>{}</td></tr>\n"
fmt += row.format("Shape", self.shape)
fmt += row.format('Origin', tuple(self.x0))
bds = self.bounds
fmt += row.format("X Bounds", (bds[0], bds[1]))
fmt += row.format("Y Bounds", (bds[2], bds[3]))
fmt += row.format("Z Bounds", (bds[4], bds[5]))
num = 0
if self.models is not None:
num = len(self.models.keys())
fmt += row.format("Models", num)
fmt += "</table>\n"
fmt += "\n"
if self.models is not None:
fmt += "</td><td>"
fmt += "\n"
fmt += "<table>\n"
row = "<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>\n"
fmt += row.format("Name", "Type", "Min", "Max")
row = "<tr><td>{}</td><td>{}</td><td>{:.3e}</td><td>{:.3e}</td></tr>\n"
for key, val in self.models.items():
dl, dh = self.get_data_range(key)
fmt += row.format(key, val.dtype, dl, dh)
fmt += "</table>\n"
fmt += "\n"
fmt += "</td></tr> </table>"
return fmt
def __getitem__(self, key):
"""Get a model of this grid by its string name"""
return self.models[key]
def to_data_frame(self, order='C'):
"""Returns the models in this Grid to a Pandas DataFrame with all arrays
flattened in the specified order. A header attribute is added to the
DataFrame to specified the grid extents. Much metadata is lost in this
conversion.
"""
self.validate()
tits = self.models.keys()
data = {k: v.flatten(order=order) for k, v in self.models.items()}
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Created on Jul 2021.
@author: <NAME>
This module processes the option 2
of the menuInicial
"""
import datetime
import pandas as pd
from getMJ import getMJ
class opcao():
def opcao2(placa, dataInicial, dataFinal, tokenMJ, cpf):
dataInicio = datetime.datetime.strptime(dataInicial, '%d/%m/%Y')
dataFim = datetime.datetime.strptime(dataFinal, '%d/%m/%Y')
deltaTime = abs((dataInicio - dataFim).days)
df1 = pd.DataFrame()
df2 = pd.DataFrame()
if deltaTime > 30:
dataFimParcial = dataInicio
dataInicioParcial = dataInicio
controle = 0
while dataFimParcial < dataFim and controle!=1:
dataFimParcial = dataInicioParcial + datetime.timedelta(days=30)
if dataFimParcial > dataFim:
dataFimParcial = dataFim
controle = 1
msg, dados = getMJ.getMovimentoPeriodo(placa, dataInicioParcial.strftime("%Y-%m-%dT%H:%M:%S"), dataFimParcial.strftime("%Y-%m-%dT%H:%M:%S"), tokenMJ, cpf)
dataInicioParcial = dataFimParcial
if msg != '':
print('Erro: Erro para busca entre as datas %s e %s' (dataInicioParcial, dataFimParcial))
else:
df1 = pd.json_normalize(dados)
df2 = df2.append(df1, ignore_index=True)
else:
msg, dados = getMJ.getMovimentoPeriodo(placa, dataInicio.strftime("%Y-%m-%dT%H:%M:%S"), dataFim.strftime("%Y-%m-%dT%H:%M:%S"), tokenMJ, cpf)
if msg != '':
print('Erro: Erro para busca entre as datas %s e %s' (dataInicioParcial, dataFimParcial))
else:
df1 = pd.json_normalize(dados)
df2 = df2.append(df1, ignore_index=True)
return(df2)
def opcao3(dadosEntrada, dataInicial, dataFinal, tokenMJ, cpf):
dataInicio = datetime.datetime.strptime(dataInicial, '%d/%m/%Y %H:%M')
dataFim = datetime.datetime.strptime(dataFinal, '%d/%m/%Y %H:%M')
deltaTime = abs(((dataInicio - dataFim).days))*24
latitude = dadosEntrada.split(',')[0]
longitude = dadosEntrada.split(',')[1]
raio = dadosEntrada.split(',')[2]
df1 = pd.DataFrame()
df2 = pd.DataFrame()
msg, listLocal = getMJ.getListaLocal(latitude, longitude, raio, tokenMJ, cpf)
localTable = pd.json_normalize(listLocal)
if msg != '':
print('Erro: ', msg )
df2 = pd.DataFrame()
return(df2)
if localTable.empty:
print('Erro: Lista de local vazia')
df2 = pd.DataFrame()
return(df2)
listaIdLocal = list(localTable['idLocal'])
for local in listaIdLocal:
if deltaTime > 1:
dataFimParcial = dataInicio
dataInicioParcial = dataInicio
controle = 0
while dataFimParcial < dataFim and controle!=1:
dataFimParcial = dataInicioParcial + datetime.timedelta(days=(1/24))
if dataFimParcial > dataFim:
dataFimParcial = dataFim
controle = 1
msg, dados = getMJ.reqIdLocalPeriodo(local, dataInicioParcial.strftime("%Y-%m-%dT%H:%M:%S"), dataFimParcial.strftime("%Y-%m-%dT%H:%M:%S"), tokenMJ, cpf)
dataInicioParcial = dataFimParcial
if msg != '':
print('Erro: Erro para busca entre as datas %s e %s' (dataInicioParcial, dataFimParcial))
else:
df1 = pd.json_normalize(dados)
df2 = df2.append(df1, ignore_index=True)
else:
msg, dados = getMJ.reqIdLocalPeriodo(local, dataInicioParcial.strftime("%Y-%m-%dT%H:%M:%S"), dataFimParcial.strftime("%Y-%m-%dT%H:%M:%S"), tokenMJ, cpf)
if msg != '':
print('Erro: Erro para busca entre as datas %s e %s' (dataInicioParcial, dataFimParcial))
else:
df1 = | pd.json_normalize(dados) | pandas.json_normalize |
import os,sys
import numpy as np
import pandas as pd
import re
from intervaltree import Interval, IntervalTree
from functools import reduce
from typing import (
List,
Set,
Iterable,
)
from collections import OrderedDict
import viola
from viola.core.indexing import Indexer
from viola.core.bed import Bed
from viola.core.fasta import Fasta
from viola.core.bedpe import Bedpe
from viola.utils.microhomology import get_microhomology_from_positions
from viola.utils.utils import get_inslen_and_insseq_from_alt
from viola._typing import (
IntOrStr,
StrOrIterableStr,
)
from viola._exceptions import (
TableNotFoundError,
InfoNotFoundError,
ContigNotFoundError,
IllegalArgumentError,
)
from sklearn.cluster import AgglomerativeClustering
class Vcf(Bedpe):
"""
Relational database-like object containing SV position dataframes,
FILTER dataframe, INFO dataframes, FORMAT dataframe, and HEADER dataframes.
The instances of this class have information equal to the VCF files.
Attributes
---------------
sv_count: int
Number of SV records
table_list
List of names of all tables included in the object
ids
List of all SV id.
Parameters
----------
df_svpos: DataFrame
DataFrame containing information such as position, strand, svtype, etc.
Columns should be following:
['id', 'chrom1', 'pos1', 'chrom2', 'pos2', 'strand1', 'strand2', 'ref', 'alt', 'qual', 'svtype']
Main key is 'id'. The 'chrom1' and 'chrom2' are the foreign key from contigs_meta table.
df_filter: DataFrame
DataFrame containing FILTER information which locate on the 7th column of the vcf file.
Columns of the input DataFrame should be following:
['id', 'filter']
Main Key is the combination of ('id', 'filter'). Each column is the foreign key from
df_svpos, and filters_meta table, respectively.
odict_df_info: dict[str, DataFrame]
OrderedDict of DataFrames which contain additional information on SV record (equivalent to INFO field of vcf).
Each item of the dictionary contains single INFO.
The dictionary key is the name of each INFO and should be in lowercase.
Columns of the DataFrame should be following:
['id', 'value_idx', 'infoname']
The 'value_idx' column contains 0-origin indice of INFO values.
This is important when one SV record has multiple values of an INFO (eg. cipos).
Main key is the combination of ('id', 'value_idx'), and 'id' is the foreign key coming from df_svpos table.
df_formats: DataFrame
DataFrame containing FORMAT information of the vcf file.
Columns of the DataFrame should be following:
['id', 'sample', 'format', 'value_idx', 'value']
Main key is the combination of ('id', 'sample', 'format').
The ('id', 'sample', 'format') are the foreign key coming from
(df_svpos, samples_meta, format_meta) table, respectively.
"""
_internal_attrs = [
"_df_svpos",
"_df_filters",
"_odict_df_info",
"_df_formats",
"_ls_infokeys",
"_odict_df_headers",
"_metadata",
"_odict_alltables",
"_repr_config",
"_sig_criteria"
]
_internal_attrs_set = set(_internal_attrs)
_repr_column_names = [
"id",
"be1",
"be2",
"strand",
"qual",
"svtype",
]
_repr_column_names_set = set(_repr_column_names)
def __init__(self, df_svpos, df_filters, odict_df_info, df_formats, odict_df_headers = {}, metadata = None):
if not isinstance(odict_df_info, OrderedDict):
raise TypeError('the type of the argument "odict_df_info" should be collections.OrderedDict')
if not isinstance(odict_df_headers, OrderedDict):
raise TypeError('the type of the argument "odict_df_headers" should be collections.OrderedDict')
df_svpos['alt'] = df_svpos['alt'].astype(str)
self._df_svpos = df_svpos
self._df_filters = df_filters
self._odict_df_info = odict_df_info
self._df_formats = df_formats
self._odict_df_headers = odict_df_headers
self._metadata = metadata
self._ls_infokeys = [ x.lower() for x in odict_df_headers['infos_meta']['id'].tolist()]
ls_keys = ['positions', 'filters'] + self._ls_infokeys + ['formats'] + \
list(odict_df_headers.keys())
ls_values = [df_svpos, df_filters] + list(odict_df_info.values()) + [df_formats] + list(odict_df_headers.values())
# self._odict_alltables is a {tablename: table} dictionary
self._odict_alltables = OrderedDict([(k, v) for k, v in zip(ls_keys, ls_values)])
self._repr_config = {
'info': None,
}
@property
def contigs(self) -> List[str]:
"""
Return a list of contigs (chromosomes) listed in the header of the VCF file.
"""
df_contigs_meta = self.get_table('contigs_meta')
arr_contigs = df_contigs_meta['id'].unique()
return list(arr_contigs)
def __repr__(self):
return super().__repr__()
def __str__(self):
return super().__repr__()
def view(self, custom_infonames=None, return_as_dataframe=False):
"""
view(custom_infonames, return_as_dataframe)
Quick view function of the Vcf object.
Parameters
-----------
custom_infonames: list_like or None, default None
The names of the INFO to show additionally.
return_as_dataframe: bool, default False
If true, return as pandas DataFrame.
"""
df_svpos = self.get_table('positions')
ser_id = df_svpos['id']
ser_be1 = df_svpos['chrom1'].astype(str) + ':' + df_svpos['pos1'].astype(str)
ser_be2 = df_svpos['chrom2'].astype(str) + ':' + df_svpos['pos2'].astype(str)
ser_strand = df_svpos['strand1'] + df_svpos['strand2']
ser_qual = df_svpos['qual']
ser_svtype = df_svpos['svtype']
ls_ser = [ser_id, ser_be1, ser_be2, ser_strand, ser_qual, ser_svtype]
ls_key = ['id', 'be1', 'be2', 'strand', 'qual', 'svtype']
dict_ = {k: v for k, v in zip(ls_key, ls_ser)}
df_out = pd.DataFrame(dict_)
if custom_infonames is not None:
df_out = self.append_infos(df_out, ls_tablenames=custom_infonames)
str_df_out = str(df_out)
str_infokeys = ','.join(list(self._ls_infokeys))
desc_info = 'INFO='
desc_doc = 'Documentation of Vcf object ==> '
doc_link = 'https://dermasugita.github.io/ViolaDocs/docs/html/reference/vcf.html'
out = desc_info + str_infokeys + '\n' + desc_doc + doc_link + '\n' + str_df_out
if return_as_dataframe:
return df_out
return str(out)
def replace_svid(self, to_replace, value):
"""
replace_svid(to_replace, value)
Renamed specified SV ID.
Parameters
----------
to_replace: int or str or List[int or str]
SV ID which are replaced.
value: int or str or List[int or str]
Values of new SV ID.
"""
if not isinstance(to_replace, list):
to_replace = [to_replace]
if not isinstance(value, list):
value = [value]
if len(to_replace) != len(value):
raise ValueError('Two arguments should be the same length. {} vs {}'.format(len(to_replace), len(value)))
set_table_list = set(self.table_list)
set_table_list_header = set(self._odict_df_headers.keys())
set_table_list_without_header = set_table_list - set_table_list_header
for rep, val in zip(to_replace, value):
for table_name in set_table_list_without_header:
df_target = self._odict_alltables[table_name]
df_target.loc[df_target['id'] == rep, 'id'] = val
self._odict_alltables[table_name] = df_target
if table_name in self._ls_infokeys:
self._odict_df_info[table_name.upper()] = df_target
def add_info_table(self, table_name, table, number, type_, description, source=None, version=None):
if table_name in self._ls_infokeys:
self.remove_info_table(table_name)
self._ls_infokeys += [table_name]
self._odict_df_info[table_name.upper()] = table
self._odict_alltables[table_name] = table
df_meta = self.get_table('infos_meta')
df_replace = df_meta.append({'id': table_name.upper(), 'number': number, 'type': type_, 'description': description, 'source': source, 'version': version},
ignore_index=True)
self._odict_df_headers['infos_meta'] = df_replace
self._odict_alltables['infos_meta'] = df_replace # not beautiful code...
def remove_info_table(self, table_name):
del self._odict_df_info[table_name.upper()]
del self._odict_alltables[table_name]
df_replace = self.get_table('infos_meta')
df_replace = df_replace.loc[df_replace['id'] != table_name.upper()]
self._odict_df_headers['infos_meta'] = df_replace
self._odict_alltables['infos_meta'] = df_replace
self._ls_infokeys.remove(table_name)
def drop_by_id(self, svid):
"""
drop_by_id(svid)
Remove SV records specified in "svid" argument.
Parameters
-----------
svid: int or str or List[int or str]
ID of SV record to be removed.
inplace: bool, default False
If False, return a copy. Otherwise, dropped SV record of the self and return None.
Returns
--------
Vcf
Return a removed Vcf instance.
"""
if not isinstance(svid, list):
svid = [svid]
set_svid = set(svid)
set_svid_all = set(self.ids)
set_svid_preserved = set_svid_all - set_svid
vcf_removed = self.filter_by_id(set_svid_preserved)
return vcf_removed
def copy(self):
"""
copy()
Return copy of the instance.
"""
df_svpos = self.get_table('positions')
df_filters = self.get_table('filters')
odict_df_infos = OrderedDict([(k, self.get_table(k.lower())) for k, v in self._odict_df_info.items()])
df_formats = self.get_table('formats')
odict_df_headers = OrderedDict([(k, self.get_table(k)) for k,v in self._odict_df_headers.items()])
metadata = self._metadata
return Vcf(df_svpos, df_filters, odict_df_infos, df_formats, odict_df_headers, metadata)
def to_vcf_like(self) -> pd.DataFrame:
"""
to_vcf_like()
Return a vcf-formatted DataFrame. Header information will not be reflected.
"""
df_base_before_position_modification = self.get_table('positions')[['chrom1', 'pos1', 'id', 'ref', 'alt', 'qual', 'svtype', 'strand1']]
def _modify_positions(x):
svtype = x.name
if svtype == 'DUP':
x['pos1'] = x['pos1'] - 1
return x
elif svtype == 'INV':
# if strand1 == '-', subtract 1 from pos1, otherwise subtract 0.
if self._metadata.get('variantcaller', None) != 'delly':
arr_strand1 = x['strand1'].values
arr_num_of_subtraction = np.where(arr_strand1 == '+', 0, 1)
x['pos1'] = x['pos1'] - arr_num_of_subtraction
return x
else:
return x
df_base = df_base_before_position_modification.groupby('svtype').apply(_modify_positions)
df_base = df_base[['chrom1', 'pos1', 'id', 'ref', 'alt', 'qual']]
df_base['qual'] = df_base['qual'].fillna('.')
ser_id = df_base['id']
ser_filter = pd.Series(["" for i in range(len(ser_id))], index=ser_id)
df_filter = self.get_table('filters')
def _create_filter_field(x):
out = ';'.join(x['filter'])
return out
ser_filter = df_filter.groupby('id').apply(_create_filter_field)
df_filter = ser_filter.reset_index(name='filter')
ser_vcfinfo = pd.Series(["" for i in range(len(ser_id))], index=ser_id)
def _create_info_field(x, info):
if x.iloc[1] > 0:
return ',' + str(x.iloc[2])
if type(x.iloc[2]) == bool:
return ';' + info.upper()
return ';' + info.upper() + '=' + str(x.iloc[2])
for info in self._ls_infokeys:
df_info = self.get_table(info)
ser_be_appended = df_info.apply(_create_info_field, axis=1, **{'info':info})
if ser_be_appended.empty:
continue
df_info_appended = df_info.copy()
df_info_appended['info'] = ser_be_appended
df_vcfinfo = df_info_appended.pivot(index='id', columns='value_idx', values='info')
df_vcfinfo = df_vcfinfo.fillna('')
ser_vcfinfo_to_append = df_vcfinfo.apply(''.join, axis=1)
ser_vcfinfo.loc[ser_vcfinfo_to_append.index] = ser_vcfinfo.loc[ser_vcfinfo_to_append.index] + ser_vcfinfo_to_append
ser_vcfinfo.replace("^;", "", regex=True, inplace=True)
df_infofield = ser_vcfinfo.reset_index(name='info')
df_format = self.get_table('formats')
ls_samples = self.get_table('samples_meta')['id']
def _create_format_field(x):
arr_format_, ind_format_ = np.unique(x['format'], return_index=True)
arr_format_ = arr_format_[np.argsort(ind_format_)]
format_ = ':'.join(arr_format_)
ls_sample_format_ = []
for sample in ls_samples:
ls_sample_values_ = []
for a_format_ in arr_format_:
mask = (x['sample'] == sample) & (x['format'] == a_format_)
ls_sample_values_.append(','.join(x.loc[mask]['value'].astype(str)))
ls_sample_format_.append(':'.join(ls_sample_values_))
out_idx = ['format'] + list(ls_samples)
return pd.Series([format_]+ls_sample_format_, index=out_idx)
df_formatfield = df_format.groupby('id').apply(_create_format_field).reset_index()
df_out = pd.merge(df_base, df_filter)
df_out = pd.merge(df_out, df_infofield)
df_out = pd.merge(df_out, df_formatfield)
return df_out
def to_vcf(self, path_or_buf = None, onlyinfo=False) -> str:
"""
to_vcf(path_or_buf)
Return a vcf-formatted String. Header information will not be reflected.
return csv file as str class.
Parameters
----------
path_or_buf: str, optional
File path to save the VCF file.
onlyinfo: bool
if you only want "info", set this option to True
Returns
-------
str
return vcf file as a string.
"""
def get_metadata():
metadata = self._metadata
out = ''
if metadata is None:
return out
for key, value in metadata.items():
if not isinstance(value, list):
value = [value]
value = [str(s) for s in value]
out += '##' + str(key) + '=' + ','.join(value) + '\n'
return out
def get_contig():
df_contig = self.get_table('contigs_meta')
if df_contig.empty:
return ''
ser_contig = '##contig=<ID=' + df_contig['id'].astype(str) + ',length=' + df_contig['length'].astype(str) + '>'
out = '\n'.join(ser_contig)
out += '\n'
return out
def get_info():
str_info = ""
for row in self.get_table("infos_meta").itertuples():
if (row.number == None):
str_num = "."
elif (row.number == -1):
str_num = "A"
else:
str_num = str(row.number)
str_info += "##INFO=<ID={},Number={},Type={},Description=\"{}\">".format(row.id, str_num, row.type,row.description)
str_info += "\n"
return str_info
def get_format():
df_format = self.get_table('formats_meta')
if df_format.empty:
return ''
df_format['number'] = df_format['number'].fillna('.')
ser_out = '##FORMAT=<ID=' + df_format['id'].astype(str) + ',Number=' + df_format['number'].astype(str) + \
',Type=' + df_format['type'].astype(str) + ',Description="' + df_format['description'].astype(str) + '">'
out = '\n'.join(ser_out)
out += '\n'
return out
def get_filter():
df_filter = self.get_table('filters_meta')
if df_filter.empty:
return ''
ser_out = '##FILTER=<ID=' + df_filter['id'].astype(str) + ',Description="' + df_filter['description'].astype(str) + '">'
out = '\n'.join(ser_out)
out += '\n'
return out
def get_alt():
df_alt = self.get_table('alts_meta')
if df_alt.empty:
return ''
ser_out = '##ALT=<ID=' + df_alt['id'].astype(str) + ',Description="' + df_alt['description'].astype(str) + '">'
out = '\n'.join(ser_out)
out += '\n'
return out
str_metadata = get_metadata()
str_contig = get_contig()
str_info = get_info()
str_format = get_format()
str_filter = get_filter()
str_alt = get_alt()
df_vcflike = self.to_vcf_like()
str_table = df_vcflike.to_csv(sep='\t', header=False, index=False)
ls_header = df_vcflike.columns.tolist()
ls_header[0:9] = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']
str_header = "\t".join(ls_header)
str_header += "\n"
ls_vcf_data = [str_metadata, str_contig, str_info, str_format, str_filter, str_alt, str_header, str_table]
#print(os.getcwd())
if (onlyinfo):
ret = str_info
else:
ret = "".join(ls_vcf_data)
if (path_or_buf is not None):
f = open(path_or_buf, 'w')
f.write(ret)
f.close()
#return ret
def to_bedpe_like(
self,
custom_infonames: Iterable[str] = [],
add_filters: bool = False,
add_formats: bool = False,
confidence_intervals: bool = False,
) -> pd.DataFrame:
"""
to_bedpe_like(custom_infonames=[], add_filters, add_formats, confidence_intervals: bool=False)
Return a DataFrame in bedpe-like format.
When specified, you can add INFOs, FILTERs, and FORMATs as additional columns.
Parameters
---------------
custom_infonames: list-like[str]
The table names of INFOs to append.
add_filters: bool, default False
sth
add_formats: bool, default False
sth
confidence_intervals: bool, default False
Whether or not to consider confidence intervals of the breakpoints.
If True, confidence intervals for each breakpoint are represented by [start1, end1) and [start2, end2), respectively.
Otherwise, breakpoints are represented by a single-nucleotide resolution.
Returns
---------------
DataFrame
A Dataframe in bedpe-like format.
The columns include at least the following:
['chrom1', 'start1', 'end1', 'chrom2', 'start2', 'end2',
'name', 'score', 'strand1', 'strand2']
"""
df_out = super().to_bedpe_like(confidence_intervals=confidence_intervals)
if len(custom_infonames) != 0:
df_out = self.append_infos(df_out, custom_infonames, left_on='name')
if add_filters:
df_out = self.append_filters(df_out, left_on='name')
if add_formats:
df_out = self.append_formats(df_out, left_on='name')
return df_out
def to_bedpe(
self,
file_or_buf: str,
custom_infonames: Iterable[str] = [],
add_filters: bool = False,
add_formats: bool = False,
confidence_intervals: bool = False,
):
"""
to_bedpe_like(file_or_buf, custom_infonames=[], add_filters, add_formats, confidence_intervals: bool=False)
Return a BEDPE file.
Parameters
---------------
file_or_buf: str
File path to save the VCF file.
custom_infonames: list-like[str]
The table names of INFOs to append.
add_filters: bool, default False
sth
add_formats: bool, default False
sth
confidence_intervals: bool, default False
Whether or not to consider confidence intervals of the breakpoints.
If True, confidence intervals for each breakpoint are represented by [start1, end1) and [start2, end2), respectively.
Otherwise, breakpoints are represented by a single-nucleotide resolution.
"""
bedpe = self.to_bedpe_like(custom_infonames=custom_infonames, add_filters=add_filters, add_formats=add_formats, confidence_intervals=confidence_intervals)
bedpe.to_csv(file_or_buf, index=None, sep='\t')
def append_infos(self, base_df,
ls_tablenames,
left_on: str = 'id',
auto_fillna: bool = True) -> pd.DataFrame:
"""
append_infos(base_df, ls_tablenames, left_on='id', auto_fillna=True)
Append INFO tables to the right of the base_df, based on the SV id columns.
If the name of the SV id column in base_df is not 'id', specify column name into left_on argument.
Parameters
---------------
base_df: DataFrame
The DataFrame to which the INFO tables are appended.
ls_tablenames: list-like
The list of INFO table names to be appended.
left_on: str, default 'id'
The name of SV id column of base_df
auto_fillna: bool, default True
If True, use the header information to handle missing values
after merging DataFrames.
Returns
---------------
DataFrame
A DataFrame which the INFO tables are added.
"""
df = base_df.copy()
df_infometa = self.get_table('infos_meta')
for tablename in ls_tablenames:
df_to_append_pre = self.get_table(tablename)
df_to_append_pre['new_column_names'] = tablename + '_' + df_to_append_pre['value_idx'].astype(str)
df_to_append = df_to_append_pre.pivot(index='id', columns='new_column_names', values=tablename)
df = pd.merge(df, df_to_append, how='left', left_on=left_on, right_index=True)
info_dtype = df_infometa.loc[df_infometa['id']==tablename.upper(), 'type'].iloc[0]
len_info = df_to_append.shape[1]
ls_ind_fancy = [tablename + '_' + str(i) for i in range(len_info)]
if info_dtype == 'Integer':
df[ls_ind_fancy] = df[ls_ind_fancy].fillna(0).astype(int)
elif info_dtype == 'Flag':
df[ls_ind_fancy] = df[ls_ind_fancy].fillna(False)
return df
def append_formats(self, base_df, left_on='id'):
"""
append_formats(base_df, left_on='id')
Append formats to the right of the base_df, based on the SV id columns.
If the name of the SV id column in base_df is not 'id', specify column name into left_on argument.
Parameters
---------------
base_df: DataFrame
The DataFrame to which the INFO tables are appended.
left_on: str, default 'id'
The name of SV id column of base_df
Returns
---------------
DataFrame
A DataFrame which the formats tables are added.
"""
df_format = self.get_table('formats')
df_format['format_id'] = df_format['sample'] + '_' + df_format['format'] + '_' + df_format['value_idx'].astype(str)
df_format.drop(['sample', 'format', 'value_idx'], axis=1, inplace=True)
df_format = df_format.pivot(index='id', columns='format_id', values='value')
df_out = pd.merge(base_df, df_format, how='left', left_on=left_on, right_index=True)
return df_out
def append_filters(self, base_df, left_on='id'):
"""
append_filters(base_df, left_on='id')
Append filters to the right of the base_df, based on the SV id columns.
If the name of the SV id column in base_df is not 'id', specify column name into left_on argument.
Parameters
---------------
base_df: DataFrame
The DataFrame to which the INFO tables are appended.
left_on: str, default 'id'
The name of SV id column of base_df
Returns
---------------
DataFrame
A DataFrame which the filters tables are added.
"""
df_filters = self.get_table('filters')
df_filters_expand = df_filters['filter'].str.get_dummies()
df_be_appended = pd.concat([ df_filters['id'], df_filters_expand ], axis=1)
df_be_appended = df_be_appended.groupby('id').sum().replace(to_replace={1: True, 0: False})
df_out = pd.merge(base_df, df_be_appended, how='left', left_on=left_on, right_on='id')
return df_out
def _parse_filter_query(self, q):
sq = q.split(' ')
# for flag informations and filters
if sq[0].startswith('!'):
sq0 = sq[0][1:]
else:
sq0 = sq[0]
if sq0.lower() in self._ls_infokeys:
df_infometa = self.get_table('infos_meta')
row_mask = df_infometa['id'].str.contains(sq0.upper())
sq_dtype = df_infometa.loc[row_mask, 'type'].iloc[0]
if sq_dtype == 'Integer':
sq[-1] = int(sq[-1])
elif sq_dtype == 'String':
sq[-1] = str(sq[-1])
elif sq_dtype =='Flag':
if len(sq) == 1:
if sq[0].startswith('!'):
flag = False
else:
flag = True
else:
flag = True if sq[-1] == 'True' else False
exclude = not flag
set_out = self._filter_infos_flag(sq0, exclude=exclude) # defined in Bedpe
return set_out
if len(sq) == 3:
set_out = self._filter_infos(sq[0], 0, sq[1], sq[2]) # defined in Bedpe
else:
set_out = self._filter_infos(*sq) # defined in Bedpe
#print(set_out)
return set_out
# is_filter?
arr_filters = self.get_table('filters_meta')['id'].values
ls_filters = list(arr_filters) + ['PASS']
if sq0 in ls_filters:
if len(sq) == 1:
if sq[0].startswith('!'):
flag = False
else:
flag = True
else:
flag = True if sq[-1] == 'True' else False
exclude = not flag
set_out = self._filter_filters(sq0, exclude=exclude)
return set_out
# is_format?
if sq0 in self.get_table('samples_meta').values:
df_formatmeta = self.get_table('formats_meta')
row_mask = df_formatmeta['id'].str.contains(sq[1])
sq_dtype = df_formatmeta.loc[row_mask, 'type'].iloc[0]
if sq_dtype == 'Integer':
sq[-1] = int(sq[-1])
elif sq_dtype == 'String':
sq[-1] = str(sq[-1])
if len(sq) == 4:
set_out = self._filter_formats(sq[0], sq[1], 0, sq[2], sq[3])
else:
sq[2] = int(sq[2])
set_out = self._filter_formats(*sq)
return set_out
# is_locus?
if sq0 in ['be1', 'be2', 'pos1', 'pos2']:
split_locus = sq[1].split(':')
chrom = split_locus[0]
if chrom.startswith('!'):
exclude_flag = True
chrom = chrom[1:]
else:
exclude_flag = False
if chrom not in self.contigs:
raise ContigNotFoundError(chrom)
if len(split_locus) == 1:
st = None
en = None
elif len(split_locus) == 2:
split_locus_coord = split_locus[1].split('-')
if len(split_locus_coord) == 1:
st = int(split_locus_coord[0])
en = int(split_locus_coord[0]) + 1
elif len(split_locus_coord) == 2:
st = split_locus_coord[0]
en = split_locus_coord[1]
if st == '':
st = None
else:
st = int(st)
if en == '':
en = None
else:
en = int(en)
if sq0 in ['be1', 'pos1']:
pos_num = 1
elif sq0 in ['be2', 'pos2']:
pos_num = 2
args = [pos_num, chrom, st, en]
if exclude_flag:
return self._filter_by_positions_exclude(*args)
return self._filter_by_positions(*args)
def filter(self, ls_query, query_logic='and'):
"""
filter(ls_query, query_logic)
Filter Vcf object by the list of queries.
Return object is also an instance of the Vcf object
"""
### != operation is dangerous
if isinstance(ls_query, str):
ls_query = [ls_query]
if query_logic == 'and':
set_result = self.get_ids()
for query in ls_query:
set_query = self._parse_filter_query(query)
set_result = set_result & set_query
elif query_logic == 'or':
set_result = set()
for query in ls_query:
set_query = self._parse_filter_query(query)
set_result = set_result | set_query
else:
ls_set_query = [self._parse_filter_query(q) for q in ls_query]
pattern = re.compile('([^0-9]*)([0-9]+)([^0-9]*)')
target = r"\1ls_set_query[\2]\3"
expr = pattern.sub(target, query_logic)
set_result = eval(expr)
out = self.filter_by_id(set_result)
return out
def _filter_by_id(self, tablename, arrlike_id):
df = self.get_table(tablename)
return df.loc[df['id'].isin(arrlike_id)].reset_index(drop=True)
def filter_by_id(self, arrlike_id):
"""
filter_by_id(arrlike_id)
Filter Vcf object according to the list of SV ids.
Return object is also an instance of the Vcf object
Parameters
---------------
arrlike_id: list-like
SV ids which you would like to keep.
Returns
---------------
Vcf
A Vcf object with the SV id specified in the arrlike_id argument.
All records associated with SV ids that are not in the arrlike_id will be discarded.
"""
out_svpos = self._filter_by_id('positions', arrlike_id)
out_filters = self._filter_by_id('filters', arrlike_id)
out_odict_df_info = OrderedDict([(k.upper(), self._filter_by_id(k, arrlike_id)) for k in self._ls_infokeys])
out_formats = self._filter_by_id('formats', arrlike_id)
out_odict_df_headers = self._odict_df_headers.copy()
out_metadata = self._metadata
return Vcf(out_svpos, out_filters, out_odict_df_info, out_formats, out_odict_df_headers, out_metadata)
def _filter_pos_table(self, item, operator, threshold):
df = self.get_table('positions')
e = "df.loc[df[item] {0} threshold]['id']".format(operator)
return set(eval(e))
def _filter_filters(self, _filter, exclude=False):
df = self.get_table('filters')
set_out = set(df.loc[df['filter'] == _filter]['id'])
if exclude:
set_out = self.get_ids() - set_out
return set_out
def _filter_formats(self, sample, item, item_idx=0, operator=None, threshold=None):
df = self.get_table('formats')
target_q = (df['sample'] == sample) & (df['format'] == item) & (df['value_idx'] == item_idx)
df_target = df.loc[target_q]
e = "df_target.loc[df_target['value'] {0} threshold]['id']".format(operator)
return set(eval(e))
def _filter_header(self, tablename):
pass
def annotate_bed(self, bed: Bed, annotation: str, suffix=['left', 'right'], description=None):
df_svpos = self.get_table('positions')
ls_left = []
ls_right = []
for idx, row in df_svpos.iterrows():
svid = row['id']
chrom1 = row['chrom1']
pos1 = row['pos1']
chrom2 = row['chrom2']
pos2 = row['pos2']
df_bp1 = bed.query(chrom1, pos1)
df_bp2 = bed.query(chrom2, pos2)
if not df_bp1.empty:
ls_left.append([svid, 0, True])
if not df_bp2.empty:
ls_right.append([svid, 0, True])
left_name = annotation + suffix[0]
right_name = annotation + suffix[1]
df_left = pd.DataFrame(ls_left, columns=('id', 'value_idx', left_name))
df_right = pd.DataFrame(ls_right, columns=('id', 'value_idx', right_name))
self.add_info_table(left_name, df_left, 0, type_="Flag", description=description)
self.add_info_table(right_name, df_right, 0, type_="Flag", description=description)
def breakend2breakpoint(self):
"""
breakend2breakpoint()
Converts a Vcf object into a breakpoint-based Vcf object by integrating the paired breakends (BND) and infering their SVTYPE.
Returns
--------
Vcf
SV records with svtype being BND were integrated into breakpoints, and svtype INFO will be overwritten.
"""
out = self.copy()
if out._metadata['variantcaller'] == 'lumpy':
ls_secondary = self.get_table('secondary')['id'].tolist()
out = out.drop_by_id(ls_secondary)
out.remove_info_table('secondary')
df_svpos = out.get_table('positions')
df_svtype = out.get_table('svtype')
if out._metadata['variantcaller'] == 'delly':
df_svpos.loc[df_svpos['svtype'] == 'BND', 'svtype'] = 'TRA'
df_svtype.loc[df_svtype['svtype'] == 'BND', 'svtype'] = 'TRA'
out._odict_alltables['positions'] = df_svpos
out._odict_alltables['svtype'] = df_svtype
out._odict_df_info['SVTYPE'] = df_svtype
return out
df_mateid = out.get_table('mateid')
df_bnd = df_svpos[df_svpos['svtype'] == 'BND']
ls_info_breakend_id = []
breakpoint_id_num = 0
if df_bnd.empty:
return self
arr_skip = np.array([])
for idx, row in df_bnd.iterrows():
svid = row['id']
ser_mateid = df_mateid.loc[df_mateid['id'] == svid, 'mateid']
if ser_mateid.empty:
mateid = None
else:
mateid = ser_mateid.item()
if np.isin(svid, arr_skip):
continue
if mateid is None:
svtype = 'BND'
elif row['chrom1'] != row['chrom2']:
svtype = 'TRA'
elif row['strand1'] == row['strand2']:
svtype = 'INV'
elif get_inslen_and_insseq_from_alt(row['alt'])[0] > abs(row['pos1'] - row['pos2']) * 0.5:
svtype = 'INS'
elif (row['pos1'] < row['pos2']) & (row['strand1'] == '-') & (row['strand2'] == '+'):
svtype = 'DUP'
elif (row['pos1'] > row['pos2']) & (row['strand1'] == '+') & (row['strand2'] == '-'):
svtype = 'DUP'
else:
svtype = 'DEL'
arr_skip = np.append(arr_skip, mateid)
breakpoint_id = 'viola_breakpoint:' + str(breakpoint_id_num)
if mateid is not None:
ls_info_breakend_id += [[breakpoint_id, 0, svid], [breakpoint_id, 1, mateid]]
else:
ls_info_breakend_id += [[breakpoint_id, 0, svid]]
df_svpos.loc[df_svpos['id'] == svid, ['id', 'svtype']] = [breakpoint_id, svtype]
df_svtype.loc[df_svtype['id'] == svid, ['id', 'svtype']] = [breakpoint_id, svtype]
out.replace_svid(svid, breakpoint_id)
breakpoint_id_num += 1
out._odict_alltables['positions'] = df_svpos
out._odict_alltables['svtype'] = df_svtype
out._odict_df_info['SVTYPE'] = df_svtype
out.remove_info_table('mateid')
df_info_breakend_id = pd.DataFrame(ls_info_breakend_id, columns=('id', 'value_idx', 'orgbeid'))
out.add_info_table('orgbeid', df_info_breakend_id, type_='String', number=2, description='Breakend ID which were in original VCF file.', source='Python package, Viola-SV.')
if out._metadata['variantcaller'] != 'lumpy':
out = out.drop_by_id(list(arr_skip))
return out
def _get_unique_events_ids(self) -> Set[IntOrStr]:
"""
now yields errors!!!
ATOMAWASHI!!!
"""
if 'mateid' not in self.table_list:
print("Can't find mateid table")
return
df = self.get_table('mateid')
df2 = df.reset_index().set_index('mateid_0').loc[df['id']]
arr_mask = df2['index'].values > np.arange(df2.shape[0])
set_to_subtract = set(df2.loc[arr_mask]['id'])
set_all_ids = self.get_ids()
set_result_ids = set_all_ids - set_to_subtract
return set_result_ids
def classify_manual_svtype(self, definitions=None, ls_conditions=None, ls_names=None, ls_order=None, return_series=True):
"""
classify_manual_svtype(definitions, ls_conditions, ls_names, ls_order=None)
Classify SV records by user-defined criteria. A new INFO table named
'manual_sv_type' will be created.
Parameters
------------
definitions: path_or_buf, default None
Path to the file which specifies the definitions of custom SV classification. This argument is disabled when "ls_condition" is not None.
If "default" is specified, the simple length-based SV classification will be employed.
If "article" is specified, the same definition file which was used in the Viola publication will be reflected.
Below is the links to each of definition file you can specify on this method.
"default" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_default.txt
"article" -> https://github.com/dermasugita/Viola-SV/blob/master/examples/demo_sig/resources/definitions/sv_class_article.txt
ls_conditions: List[callable] or List[str], default None
List of definitions of custom SV classification. The data type of the elements in the list can be callable or SV ID (str).
callable --> Functions that takes a self and returns a list of SV ID that satisfy the conditions of the SV class to be defined.
SV ID --> Lists of SV ID that satisfy the conditions of the SV class to be defined.
This argument is disabled when "definitions" is not None.
ls_names: List[str], default None
List of the names of the custom SV class corresponding to the "ls_conditions". This argument is disabled when "definitions" is not None.
return_series: bool, default True
Return counts of each custom SV class as a pd.Series.
Returns
---------
pd.Series or None
"""
set_ids_current = set(self.ids)
obj = self
ls_ids = []
ls_result_names = []
if definitions is not None:
if isinstance(definitions, str):
if definitions == "default":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_default.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
elif definitions == "article":
d = os.path.dirname(sys.modules["viola"].__file__)
definitions = os.path.join(d, "data/sv_class_article.txt")
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(open(definitions, 'r'))
else:
ls_conditions, ls_names = self._parse_signature_definition_file(definitions)
for cond, name in zip(ls_conditions, ls_names):
obj = obj.filter_by_id(set_ids_current)
if callable(cond):
ids = cond(obj)
else:
ids = cond
set_ids = set(ids)
set_ids_intersection = set_ids_current & set_ids
ls_ids += list(set_ids_intersection)
ls_result_names += [name for i in range(len(set_ids_intersection))]
set_ids_current = set_ids_current - set_ids_intersection
ls_ids += list(set_ids_current)
ls_result_names += ['others' for i in range(len(set_ids_current))]
ls_zeros = [0 for i in range(len(self.ids))]
df_result = pd.DataFrame({'id': ls_ids, 'value_idx': ls_zeros, 'manual_sv_type': ls_result_names})
self.add_info_table('manual_sv_type', df_result, number=1, type_='String', description='Custom SV class defined by user')
if return_series:
if ls_order is None:
pd_ind_reindex = pd.Index(ls_names + ['others'])
else:
pd_ind_reindex = | pd.Index(ls_order) | pandas.Index |
# IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.stocks.due_diligence import dd_controller
# pylint: disable=E1101
# pylint: disable=W0603
first_call = True
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_menu_quick_exit(mocker):
mocker.patch("builtins.input", return_value="quit")
mocker.patch("gamestonk_terminal.stocks.due_diligence.dd_controller.session")
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.session.prompt",
return_value="quit",
)
stock = pd.DataFrame()
dd_controller.menu(
ticker="TSLA", start="10/25/2021", interval="1440min", stock=stock
)
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_menu_system_exit(mocker):
global first_call
first_call = True
def side_effect(arg):
global first_call
if first_call:
first_call = False
raise SystemExit()
return arg
m = mocker.Mock(return_value="quit", side_effect=side_effect)
mocker.patch("builtins.input", return_value="quit")
mocker.patch("gamestonk_terminal.stocks.due_diligence.dd_controller.session")
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.session.prompt",
return_value="quit",
)
mocker.patch(
"gamestonk_terminal.stocks.due_diligence.dd_controller.DueDiligenceController.switch",
new=m,
)
stock = pd.DataFrame()
dd_controller.menu(
ticker="TSLA", start="10/25/2021", interval="1440min", stock=stock
)
@pytest.mark.block_network
@pytest.mark.record_stdout
def test_print_help():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock=pd.DataFrame()
)
dd.print_help()
@pytest.mark.block_network
def test_switch_empty():
dd = dd_controller.DueDiligenceController(
ticker="", start="", interval="", stock= | pd.DataFrame() | pandas.DataFrame |
"""
Download, transform and simulate various binary datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from .base import Datasets, FETCH_URLS, RANDOM_STATE
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data = pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
data = data.drop(columns=0).rename(columns={8: "target"})
data["target"] = data["target"].isin(["pp"]).astype(int)
return data
def fetch_eucalyptus(self):
"""Download and transform the Eucalyptus Data Set.
The minority class is identified as the `best` label
and the majority class as the rest of the labels.
https://www.openml.org/d/188
"""
data = pd.read_csv(FETCH_URLS["eucalyptus"])
data = data.iloc[:, -9:].rename(columns={"Utility": "target"})
data = data[data != "?"].dropna()
data["target"] = data["target"].isin(["best"]).astype(int)
return data
def fetch_glass(self):
"""Download and transform the Glass Identification Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/glass+identification
"""
data = pd.read_csv(FETCH_URLS["glass"], header=None)
data = data.drop(columns=0).rename(columns={10: "target"})
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_haberman(self):
"""Download and transform the Haberman's Survival Data Set.
The minority class is identified as the `1` label
and the majority class as the `0` label.
https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
"""
data = pd.read_csv(FETCH_URLS["haberman"], header=None)
data.rename(columns={3: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_heart(self):
"""Download and transform the Heart Data Set.
The minority class is identified as the `2` label
and the majority class as the `1` label.
http://archive.ics.uci.edu/ml/datasets/statlog+(heart)
"""
data = pd.read_csv(FETCH_URLS["heart"], header=None, delim_whitespace=True)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_iris(self):
"""Download and transform the Iris Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/iris
"""
data = pd.read_csv(FETCH_URLS["iris"], header=None)
data.rename(columns={4: "target"}, inplace=True)
data["target"] = data["target"].isin(["Iris-setosa"]).astype(int)
return data
def fetch_libras(self):
"""Download and transform the Libras Movement Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Libras+Movement
"""
data = pd.read_csv(FETCH_URLS["libras"], header=None)
data.rename(columns={90: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_liver(self):
"""Download and transform the Liver Disorders Data Set.
The minority class is identified as the `1` label
and the majority class as the '2' label.
https://archive.ics.uci.edu/ml/datasets/liver+disorders
"""
data = pd.read_csv(FETCH_URLS["liver"], header=None)
data.rename(columns={6: "target"}, inplace=True)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_pima(self):
"""Download and transform the Pima Indians Diabetes Data Set.
The minority class is identified as the `1` label
and the majority class as the '0' label.
https://www.kaggle.com/uciml/pima-indians-diabetes-database
"""
data = pd.read_csv(FETCH_URLS["pima"], header=None, skiprows=9)
data.rename(columns={8: "target"}, inplace=True)
return data
def fetch_vehicle(self):
"""Download and transform the Vehicle Silhouettes Data Set.
The minority class is identified as the `1` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/Statlog+(Vehicle+Silhouettes)
"""
data = pd.DataFrame()
for letter in ascii_lowercase[0:9]:
partial_data = pd.read_csv(
urljoin(FETCH_URLS["vehicle"], "xa%s.dat" % letter),
header=None,
delim_whitespace=True,
)
partial_data = partial_data.rename(columns={18: "target"})
partial_data["target"] = partial_data["target"].isin(["van"]).astype(int)
data = data.append(partial_data)
return data
def fetch_wine(self):
"""Download and transform the Wine Data Set.
The minority class is identified as the `2` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/wine
"""
data = pd.read_csv(FETCH_URLS["wine"], header=None)
data.rename(columns={0: "target"}, inplace=True)
data["target"] = data["target"].isin([2]).astype(int)
return data
def fetch_new_thyroid_1(self):
"""Download and transform the Thyroid Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
.. note:: The positive class was originally label 2.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = pd.read_csv(
FETCH_URLS["new_thyroid"],
header=None,
)
data.rename(columns={0: "target"}, inplace=True)
data["target"] = (data["target"] == 2).astype(int)
return data
def fetch_new_thyroid_2(self):
"""Download and transform the Thyroid Disease Data Set.
The minority class is identified as the `positive`
label and the majority class as the `negative` label.
.. note:: The positive class was originally label 3.
https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
"""
data = pd.read_csv(
FETCH_URLS["new_thyroid"],
header=None,
)
data.rename(columns={0: "target"}, inplace=True)
data["target"] = (data["target"] == 3).astype(int)
return data
def fetch_cleveland(self):
"""Download and transform the Heart Disease Cleveland Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://archive.ics.uci.edu/ml/datasets/heart+disease
"""
data = pd.read_csv(FETCH_URLS["cleveland"], header=None)
data.rename(columns={13: "target"}, inplace=True)
data["target"] = (data["target"] == 1).astype(int)
return data
def fetch_dermatology(self):
"""Download and transform the Dermatology Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://archive.ics.uci.edu/ml/datasets/Dermatology
"""
data = pd.read_csv(FETCH_URLS["dermatology"], header=None)
data.rename(columns={34: "target"}, inplace=True)
data.drop(columns=33, inplace=True)
data["target"] = (data.target == 6).astype(int)
return data
def fetch_led(self):
"""Download and transform the LED Display Domain Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://www.openml.org/d/40496
"""
data = pd.read_csv(FETCH_URLS["led"])
data.rename(columns={"Class": "target"}, inplace=True)
data["target"] = (data.target == 1).astype(int)
return data
def fetch_page_blocks(self):
"""Download and transform the Page Blocks Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://www.openml.org/d/1021
"""
data = pd.read_csv(FETCH_URLS["page_blocks"])
data.rename(columns={"class": "target"}, inplace=True)
data["target"] = (data.target != 1).astype(int)
return data
def fetch_vowel(self):
"""Download and transform the Vowel Recognition Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://www.openml.org/d/375
"""
data = pd.read_csv(FETCH_URLS["vowels"])
data.rename(columns={"speaker": "target"}, inplace=True)
data.drop(columns=["utterance", "frame"], inplace=True)
data["target"] = (data["target"] == 1).astype(int)
return data
def fetch_yeast(self):
"""Download and transform the Yeast Data Set.
The minority class is identified as the `positive` label and
the majority class as the `negative` label.
https://archive.ics.uci.edu/ml/datasets/Yeast
"""
data = pd.read_csv(FETCH_URLS["yeast"], header=None)
data = pd.DataFrame(
[
[val for val in row.split(" ") if len(val) != 0]
for row in data[0].tolist()
]
)
data.drop(columns=0, inplace=True)
data.rename(columns={9: "target"}, inplace=True)
data["target"] = (data["target"] == "MIT").astype(int)
return data
class BinaryDatasets(Datasets):
"""Class to download, transform and save binary class datasets."""
def fetch_banknote_authentication(self):
"""Download and transform the Banknote Authentication Data Set.
https://archive.ics.uci.edu/ml/datasets/banknote+authentication
"""
data = pd.read_csv(FETCH_URLS["banknote_authentication"], header=None)
data.rename(columns={4: "target"}, inplace=True)
return data
def fetch_arcene(self):
"""Download and transform the Arcene Data Set.
https://archive.ics.uci.edu/ml/datasets/Arcene
"""
url = FETCH_URLS["arcene"]
data, labels = [], []
for data_type in ("train", "valid"):
data.append(
pd.read_csv(
urljoin(url, f"ARCENE/arcene_{data_type}.data"),
header=None,
sep=" ",
).drop(columns=list(range(1998, 10001)))
)
labels.append(
pd.read_csv(
urljoin(
url,
("ARCENE/" if data_type == "train" else "")
+ f"arcene_{data_type}.labels",
),
header=None,
).rename(columns={0: "target"})
)
data = pd.concat(data, ignore_index=True)
labels = pd.concat(labels, ignore_index=True)
data = pd.concat([data, labels], axis=1)
data["target"] = data["target"].isin([1]).astype(int)
return data
def fetch_audit(self):
"""Download and transform the Audit Data Set.
https://archive.ics.uci.edu/ml/datasets/Audit+Data
"""
zipped_data = requests.get(FETCH_URLS["audit"]).content
unzipped_data = (
ZipFile(BytesIO(zipped_data))
.read("audit_data/audit_risk.csv")
.decode("utf-8")
)
data = pd.read_csv(StringIO(sub(r"@.+\n+", "", unzipped_data)), engine="python")
data = (
data.drop(columns=["LOCATION_ID"])
.rename(columns={"Risk": "target"})
.dropna()
)
return data
def fetch_spambase(self):
"""Download and transform the Spambase Data Set.
https://archive.ics.uci.edu/ml/datasets/Spambase
"""
data = | pd.read_csv(FETCH_URLS["spambase"], header=None) | pandas.read_csv |
from __future__ import print_function
import collections
import os
import sys
import numpy as np
import pandas as pd
try:
from sklearn.impute import SimpleImputer as Imputer
except ImportError:
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler, MinMaxScaler, MaxAbsScaler
file_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
import candle
global_cache = {}
SEED = 2017
P1B3_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P1B3/'
DATA_URL = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/combo/'
def get_file(url):
return candle.fetch_file(url, 'Pilot1')
def impute_and_scale(df, scaling='std', keepcols=None):
"""Impute missing values with mean and scale data included in pandas dataframe.
Parameters
----------
df : pandas dataframe
dataframe to impute and scale
scaling : 'maxabs' [-1,1], 'minmax' [0,1], 'std', or None, optional (default 'std')
type of scaling to apply
"""
if keepcols is None:
df = df.dropna(axis=1, how='all')
else:
df = df[keepcols].copy()
all_na_cols = df.columns[df.isna().all()]
df[all_na_cols] = 0
imputer = Imputer(strategy='mean')
mat = imputer.fit_transform(df)
if scaling is None or scaling.lower() == 'none':
return pd.DataFrame(mat, columns=df.columns)
if scaling == 'maxabs':
scaler = MaxAbsScaler()
elif scaling == 'minmax':
scaler = MinMaxScaler()
else:
scaler = StandardScaler()
mat = scaler.fit_transform(mat)
df = | pd.DataFrame(mat, columns=df.columns) | pandas.DataFrame |
import pandas as pd
import sys
from datetime import datetime
from pytz import timezone, utc
def str_list(s_cd):
cds = []
if type(s_cd) == str:
cds = []
cds.append(s_cd)
else:
cds = s_cd
return cds
def today_yymmdd():
d = pd.Timestamp.today().date().strftime('%y%m%d')
return d
def present_date():
d = pd.Timestamp.today().strftime('%Y-%m-%d')
return d
def a_week_ago():
tmp = | pd.Timestamp.today() | pandas.Timestamp.today |
from datetime import datetime
import pandas as pd
import os
import re
from .transformers_map import transformers_map
def build_data_frame(backtest: dict, csv_path: str):
"""Creates a Pandas DataFame with the provided backtest. Used when providing a CSV as the datafile
Parameters
----------
backtest: dict, provides instructions on how to build the dataframe
csv_path: string, absolute path of where to find the data file
Returns
-------
object, A Pandas DataFrame indexed buy date
"""
df = load_basic_df_from_csv(csv_path)
if df.empty:
raise Exception("Dataframe is empty. Check the start and end dates")
df = prepare_df(df, backtest)
return df
def load_basic_df_from_csv(csv_path: str):
"""Loads a dataframe from a csv
Parameters
----------
csv_path: string, path to the csv so it can be read
Returns
df, A basic dataframe with the data from the csv
"""
if not os.path.isfile(csv_path):
raise Exception(f"File not found: {csv_path}")
df = pd.read_csv(csv_path, header=0)
df = standardize_df(df)
return df
def prepare_df(df: pd.DataFrame, backtest: dict):
"""Prepares the provided dataframe for a backtest by applying the datapoints and splicing based on the given backtest.
Useful when loading an existing dataframe (ex. from a cache).
Parameters
----------
df: DataFrame, should have all the open, high, low, close, volume data set as headers and indexed by date
backtest: dict, provides instructions on how to build the dataframe
Returns
------
df: DataFrame, with all the datapoints as column headers and trimmed to the provided time frames
"""
datapoints = backtest.get("datapoints", [])
df = apply_transformers_to_dataframe(df, datapoints)
trailing_stop_loss = backtest.get("trailing_stop_loss", 0)
if trailing_stop_loss:
df["trailing_stop_loss"] = df["close"].cummax() * (
1 - float(trailing_stop_loss)
)
chart_period = backtest.get("chart_period", "1Min")
start_time = backtest.get("start")
stop_time = backtest.get("stop")
df = apply_charting_to_df(df, chart_period, start_time, stop_time)
return df
def apply_charting_to_df(
df: pd.DataFrame, chart_period: str, start_time: str, stop_time: str
):
"""Modifies the dataframe based on the chart_period, start dates and end dates
Parameters
----------
df: dataframe with data loaded
chart_period: string, describes how often to sample data, default is '1Min' (1 minute)
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
start_time: datestring in YYYY-MM-DD HH:MM (ex. 2020-08-31 04:00) of when to begin the backtest
stop_time: datestring of YYYY-MM-DD HH:MM when to stop the backtest
Returns
DataFrame, a sorted dataframe ready for consumption by run_backtest
"""
if df.index.dtype != "datetime64[ns]":
headers = df.columns.values.tolist()
headers.extend([df.index.name])
if "date" not in headers:
raise Exception(
"Data does not have a date column. Headers must include date, open, high, low, close, volume."
)
time_unit = detect_time_unit(df.date[1])
df.date = pd.to_datetime(df.date, unit=time_unit)
df.set_index("date", inplace=True)
if start_time:
if isinstance(start_time, datetime) or type(start_time) is int:
time_unit = detect_time_unit(start_time)
start_time = pd.to_datetime(start_time, unit=time_unit)
start_time = start_time.strftime("%Y-%m-%d %H:%M:%S")
if stop_time:
if isinstance(stop_time, datetime) or type(stop_time) is int:
time_unit = detect_time_unit(stop_time)
stop_time = pd.to_datetime(stop_time, unit=time_unit)
stop_time = stop_time.strftime("%Y-%m-%d %H:%M:%S")
df = df.resample(chart_period).first()
if start_time and stop_time:
df = df[start_time:stop_time] # noqa
elif start_time and not stop_time:
df = df[start_time:] # noqa
elif not start_time and stop_time:
df = df[:stop_time]
return df
def apply_transformers_to_dataframe(df: pd.DataFrame, datapoints: list):
"""Applies indications from the backtest to the dataframe
Parameters
----------
df: dataframe loaded with data
datapoints: list of indictors as dictionary objects
transformer detail:
{
"transformer": "", string, actual function to be called MUST be in the datapoints
"name": "", string, name of the transformer, becomes a column on the dataframe
"args": [], list arguments to pass the the function
}
Returns
-------
df, a modified dataframe with all the datapoints calculated as columns
"""
for ind in datapoints:
transformer = ind.get("transformer")
field_name = ind.get("name")
if len(ind.get("args", [])):
args = ind.get("args")
# df[field_name] = datapoints[transformer](df, *args)
trans_res = transformers_map[transformer](df, *args)
else:
trans_res = transformers_map[transformer](df)
if isinstance(trans_res, pd.DataFrame):
df = process_res_df(df, ind, trans_res)
if isinstance(trans_res, pd.Series):
df[field_name] = trans_res
return df
def process_res_df(df, ind, trans_res):
"""handle if a transformer returns multiple columns
To manage this, we just add the name of column in a clean
way, removing periods and lowercasing it.
Parameters
----------
df, dataframe, current dataframe
ind, indicator object
trans_res, result from the transformer function
Returns
-------
df, dataframe, updated dataframe with the new columns
"""
for key in trans_res.keys().values:
i_name = ind.get("name")
clean_key = key.lower()
clean_key = clean_key.replace(".", "")
clean_key = clean_key.replace(" ", "_")
df_key = f"{i_name}_{clean_key}"
df[df_key] = trans_res[key]
return df
def detect_time_unit(str_or_int: str or int):
"""Determines a if a timestamp is really a timestamp and if it
matches is in seconds or milliseconds
Parameters
----------
str_or_int: string or int of the timestamp to detect against
Returns
-------
string of "s" or "ms", or None if nothing detected
"""
str_or_int = str(str_or_int)
regex1 = r"^(\d{10})$"
regex2 = r"^(\d{13})$"
if re.match(regex1, str_or_int):
return "s"
if re.match(regex2, str_or_int):
return "ms"
def standardize_df(df: pd.DataFrame):
"""Standardizes a dataframe with the basic features used
throughout the project.
Parameters
----------
df: A pandas dataframe (probably one just created) with
at least the required columns of: date, open, close, high, low, volume.
Returns
-------
A new pandas dataframe of with all the data in the expected types.
"""
new_df = df.copy()
if "date" in new_df.columns:
new_df = new_df.set_index("date")
ts = str(new_df.index[0])
time_unit = detect_time_unit(ts)
new_df.index = pd.to_datetime(new_df.index, unit=time_unit)
new_df = new_df[~new_df.index.duplicated(keep="first")]
new_df = new_df.sort_index()
columns_to_drop = ["ignore", "date"]
new_df.drop(columns=columns_to_drop, errors="ignore")
new_df.open = pd.to_numeric(new_df.open)
new_df.close = pd.to_numeric(new_df.close)
new_df.high = pd.to_numeric(new_df.high)
new_df.low = | pd.to_numeric(new_df.low) | pandas.to_numeric |
import warnings
warnings.filterwarnings("ignore")
import pickle
import json
import pandas as pd
import numpy as np
from pathlib import Path
from process_functions import adjust_names, aggregate_countries, moving_average, write_log
from pickle_functions import picklify, unpicklify
######################################
# Retrieve data
######################################
# Paths
path_UN = Path.cwd() / 'input' / 'world_population_2020.csv'
path_confirmed = Path.cwd() / 'input' / 'df_confirmed.csv'
path_deaths = Path.cwd() / 'input' / 'df_deaths.csv'
path_policy = Path.cwd() / 'input' / 'df_policy.csv'
#path_geo = Path.cwd() / 'input'/ 'countries.geojson'
# get data directly from github. The data source provided by Johns Hopkins University.
url_confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
url_deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
url_policy = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv'
#df.to_csv(r'C:/Users/John\Desktop/export_dataframe.csv', index = None)
pop = pd.read_csv(path_UN)
#load old data
df_confirmed_backup = pd.read_csv(path_confirmed)
old_df_confirmed = df_confirmed_backup[['Province/State','Country/Region']]
df_deaths_backup = pd.read_csv(path_deaths)
old_df_deaths = df_deaths_backup[['Province/State','Country/Region']]
df_policy_backup = | pd.read_csv(path_policy) | pandas.read_csv |
from baseq.utils.file_reader import read_file_by_lines
import pandas as pd
pd.set_option('precision', 3)
def fastq_basecontent_quality(sample, fastq_path, maxLines = 10000):
"""
Generate the basic quality stats of the fastq file
Return:
dataframe: A/T/C/G/quality;
base content figure in base64;
base quality figure in base64;
"""
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
inlines = read_file_by_lines(fastq_path, maxLines, 4)
seqs = []
quals = []
content = {}
for line in inlines:
seqs.append(line[1].strip())
quals.append(line[3].strip())
seqlen = len(seqs[0])
quality = [0] * seqlen
bases = ['A', 'T', 'C', 'G']
for base in bases:
content[base] = [0] * seqlen
#count bases
for seq in seqs:
for idx, base in enumerate(seq):
if base in ['A', 'T', 'C', 'G'] and idx < seqlen:
content[base][idx] += 1
#count quality
for qual in quals:
for idx, base in enumerate(qual):
if idx < seqlen:
if not (ord(base) - 33):
print(ord(base) - 33)
quality[idx] += ord(base) - 33
#high_bias_pos: position where one base exceed 50% of all coverage;
high_bias_pos = 0
for base in ['A', 'T', 'C', 'G']:
content[base] = [float(x) / len(seqs) for x in content[base]]
high_bias_pos += sum([1 for x in content[base] if x>=0.5])
#quality and mean quality of all bases...
content['quality'] = [q / len(seqs) for q in quality]
mean_quality= round(sum(content['quality'])/len(content['quality']), 2)
#plot basecontent...
plt.figure(figsize=(4, 2))
plt.plot(range(1, seqlen+1), content['A'])
plt.ylim((0, 1))
plt.savefig("./{}_basecontent.png".format(sample))
#plot quality
plt.figure(figsize=(4, 2))
plt.plot(range(1, seqlen+1), content['quality'])
plt.savefig("./{}_basequality.png".format(sample))
| pd.DataFrame(content, columns=['A', 'T', 'C', 'G', 'quality']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
## python 3.7.7, pandas 1.1.3, numpy 1.19.2
#
# In[1]:
import numpy as np
import pandas as pd
import os
import re
import sys
import argparse
import warnings
warnings.filterwarnings('ignore') ## want to avoid print warnings with pandas merges that can be ignored
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='1.0 (initial release)')
parser.add_argument('--fargene', help='full path to fARGene output, if included')
parser.add_argument('--shortbred', help='full path to shortBRED output (tsv), if included')
parser.add_argument('--shortbred_map', help='full path to shortBRED mapping file, if included and not using default')
parser.add_argument('--abx_map', help='full path to Abx:drug class mapping file, if included')
parser.add_argument('--db_files', help='Path to ontology index files, exclude "/" on end of path ', default= "./db_files")
parser.add_argument('--AMR_key', help='full path to key with known AMR phenotypes, REQUIRED', required = True)
parser.add_argument('--name', help='an identifier for this analysis run, REQUIRED', required = True)
parser.add_argument('--ham_out', help='output file from hAMRonization (tsv), REQUIRED', required = True)
## pull args
args = parser.parse_args()
##Emily's Point To Files for Github
## read in card ontologies as new key
card_key = pd.read_csv(f"{args.db_files}/aro_categories_index.tsv", sep='\t')
## read in drug class key data
if args.abx_map:
abx_key_name = args.AMR_key
abx_key = pd.read_csv(abx_key_name)
else:
abx_key = pd.read_csv(f"{args.db_files}/cleaned_drug_class_key.csv")
# point to known data
if args.AMR_key:
mock_name = args.AMR_key
mock = pd.read_csv(mock_name)
# point to observed data, e.g. hAMRonization output
raw_name = args.ham_out
raw_ham = pd.read_csv(raw_name, error_bad_lines=False, sep = "\t")
## get ham sum and add fargene and shortbred
#resx = ["res_5x/", "res_50x/", "res_100x/"]
this_run_res = args.name
res_name = args.name
outdir = str(args.name) + "/"
dir_cmd = "mkdir " + outdir
os.system(dir_cmd)
## read in shortbred
if args.shortbred:
shortbred_path = args.shortbred
shortbred = pd.read_csv(shortbred_path, sep = "\t")
## hits only greater than zero
shortbred = shortbred[shortbred['Hits'] > 0]
shortbred['analysis_software_name'] = "shortbred"
shortbred['gene_symbol'] = shortbred['Family'] ## merge "family" with gene symbol as closest match
## give meaning to shortbred family
if args.shortbred_map:
shortmap_name = args.shortbred_map
shortmap = pd.csv_csv(shortmap_name, sep = "\t")
else:
shortmap =pd.read_csv(f'{args.db_files}/ShortBRED_ABR_Metadata.tab', sep = "\t")
shortbred = shortbred.merge(shortmap, how = "left")
shortbred['drug_class'] = shortbred['Merged.ID']
## merge shortbred and rawham results
raw_ham = raw_ham.append(shortbred,ignore_index = True)
### integrate fargene results
# note there there is a discrepancy in that the results folder has mixed caps and lower case
# so run "find . -depth | xargs -n 1 rename 's/(.*)\/([^\/]*)/$1\/\L$2/' {} \;" in the command line to fix it
beta_lactam_models = ['class_a', 'class_b_1_2', 'class_b_3', 'class_c', 'class_d_1', 'class_d_2' ]
subdirs = ['class_a', 'class_b_1_2', 'class_b_3', 'class_c', 'class_d_1', 'class_d_2', 'qnr', 'tet_efflux', 'tet_rpg', 'tet_enzyme']
if args.fargene:
for model in subdirs:
#for res in resx:
fpath = str(args.fargene + model + "/hmmsearchresults/contigs-" + model + "-hmmsearched.out") #"/home/ewissel/amr-benchmarking/mock_2/" + res + "fargene_out_fa_2/" + model + "/hmmsearchresults/contigs-" + model + "-hmmsearched.out"
#print(path)
f = pd.read_csv(fpath, engine='python', sep = "\t", header = 1, skipfooter = 9)
## filter to just what this iteration is
#if res == this_run_res:
repi = len(f)
#print(res, "\t", model, "\t", len(f))
if model in beta_lactam_models:
resistance = "BETA-LACTAM"
elif "tet" in model:
resistance = "tetracycline"
elif model =="qnr":
resistance = "QUINOLONE"
#print("fARGene\t", resistance)
df = pd.DataFrame({'analysis_software_name': 'fARGene', 'drug_class':[resistance]})
newdf = pd.DataFrame(np.repeat(df.values,repi,axis=0))
newdf.columns = df.columns
# print(newdf)
raw_ham = raw_ham.append(newdf,ignore_index = True)
##############################
## now fargene and shortbred have been added to the raw_ham table
# This was initially a manual cleaning process, but after pouring through the results at one resolution (20x), here is the closest code version of the manual cleaning process. Basically, we want to default to CARD drug classes for accession number. Then, give all the genes with the same gene name the CARD drug class (overriding the HAM drug class). Fill in NAs with the HAM drug class.
## read in above
card_key['AMR Gene Family'] = card_key['AMR Gene Family'].str.lower()
## have top fill in protein accession NAs with 0.00 so that join works
card_key['Protein Accession'] = card_key['Protein Accession'].fillna(101010)
card_key.head()
card = card_key.melt(id_vars=['AMR Gene Family', 'Drug Class', 'Resistance Mechanism'],
value_vars=['Protein Accession', 'DNA Accession'],
var_name = "accession_source",
value_name = "accession")
card['drug_class_card'] = card['Drug Class']
## merge raw ham and card on accession / reference_accession
salted_ham = raw_ham.merge(card, left_on = "reference_accession", right_on = "accession", how = "left")
## if gene_sumbol and drug class card NaN, filter out
smol_gene = salted_ham[['gene_symbol','drug_class_card']].drop_duplicates().dropna(thresh=2)
smol_dict = pd.Series(smol_gene.drug_class_card.values, index=smol_gene.gene_symbol).to_dict()
## make def to apply dict to fill in NAs
def curedThatHam(x):
var = ''
if pd.isnull(x['drug_class_card']): ## if there is no card match from accession
gene = x['gene_symbol']
try:
var = smol_dict[gene]
except KeyError:
var = x['drug_class_card']
return var
# apply
salted_ham['drug_class_card'] = salted_ham.apply(lambda x: curedThatHam(x),axis = 1) ## override OG col because check confirmed it was ok
salted_ham.analysis_software_name.unique()
salted_ham['drug_class_ham'] = salted_ham['drug_class']
## There are 9 tools in out salted ham. This is correct. We can now covert salted ham to cured ham
cured_ham = pd.DataFrame(salted_ham)
## import the Resfinder results
##### this code block is outdated because I did this with the hamronizer tool; leaving for future ref tho
## had to run on web bc the docker version was broke AF
#resfinder_results = pd.read_csv("../mock_2/resfinder/new_resfinder_webpage_results/resfinder_out.tsv", sep = "\t")
#resfinder_results['analysis_software_name'] = "resfinder4_online"
#cured_ham = cured_ham.append(resfinder_results, ignore_index= True)
### for each row
### if drug_class_card is not empty take value
## elif drug_class_card is empty and drug_class_ham is not empty, take drug_class_ham
## else: (if both columns for drug class are empty) add to counter as lost rows
def simplify_drug_class(dat):
drug_class = ""
if not pd.isna(dat.drug_class_card):
drug_class = str(dat['drug_class_card'])
elif not pd.isna(dat.drug_class_ham):
drug_class = str( dat['drug_class_ham'])
elif not pd.isna(dat['Drug Class']):
drug_class = dat['Drug Class'] # from shortbred
else:
drug_class = str("unknown")
dat['drug_class'] = drug_class
return dat
import re
cured_ham = cured_ham.apply(lambda x: simplify_drug_class(x), axis = 1)
cured_ham['drug_class'] = cured_ham['drug_class'].str.lower()
cured_ham['drug_class'] = cured_ham['drug_class'].apply(lambda x: (re.split(r";|, |: | and ", x )))
####### has the generic abx names to drug class
abx_key['abx_class'] = (abx_key['drug_class']
.apply(lambda x: x if type(x)== str else "")
.apply(lambda x: ''.join(e for e in x if e.isalnum())))
abx_melted = abx_key.melt(value_vars=['Generic name', 'Brand names'], id_vars=['abx_class'], var_name = "abx_type", value_name = "abx")
## currently this is only adding generic names column
#abx_melted[abx_melted['abx_type']=="Brand names"]
# In[1392]:
abx_melted['abx'] = abx_melted['abx'].str.lower().str.strip()
#abx_melted[abx_melted['abx']=='tazobactam']
## next combine the drug clas to the antibiotic in the mock community
mock['Abx_split'] = mock['Abx'].apply(lambda x: x.split('-') if "-" in x else x)
mock['Abx'] = (mock['Abx']
.apply(lambda x: x if type(x)== str else "")
.apply(lambda x: ''.join(e for e in x if e.isalnum())))
mock = mock.explode('Abx_split')
mock['Abx_split'] = mock['Abx_split'].str.lower()
merged = mock.merge(abx_melted, left_on = 'Abx_split', right_on='abx', how='left')
merged['abx_class'] = merged['abx_class'].str.lower()
merged['Abx_split'][merged['abx_class'].isna()].unique() ## need to go make data tidy for this to work sucessfully
# Now we have clean data! Now we want to create true +/- and false +/- in `cured_ham`.
## first filter merged so it is only the resistant ones
resistant_mock = merged[merged['classification']=="resistant"]
len(resistant_mock['abx'].unique()) # number if resustant antibiotuics
len(resistant_mock['abx_class'].unique()) # number drug classes resistant
sus_mock = merged[merged['classification']=="susceptible"]
boolean_list = ~sus_mock.Abx.isin(resistant_mock['Abx'])
filtered_sus = sus_mock[boolean_list]
filtered_sus.abx.unique() ## only 6 antibiotics that are susceptible in the entire mock community
filtered_sus.abx_class.unique()
## filter filtered sus so that drug classes are unique to sus, not in resistant group
## prior had to filter by antibiotic tested bc only know at drug level, not drug class.
boolean2 = ~filtered_sus.abx_class.isin(resistant_mock['abx_class'])
smol_sus = filtered_sus[boolean2]
smol_sus.abx_class.unique() ## only 2 drug classes are KNOWN negatives in entire mock
cured_ham = pd.DataFrame( cured_ham.explode('drug_class'))
cured_ham['drug_class'] = (cured_ham['drug_class']
.apply(lambda x: x if type(x)== str else "")
.apply(lambda x: ''.join(e for e in x if e.isalnum())))
#cured_ham['drug_class'].unique()
#cured_ham.head()
#cured_ham.shape ## jumps to over 2000 long
# Now we have the `cured_ham` dataset, which contains all our observations and is cleaned so that every gene observation has a drug class assigned to it (unless the drug class is unknown).
# Now we want to assign those true/false -/+ values.
# In[1400]:
## give false + / - values
ref_abx = resistant_mock['abx_class'].str.lower()
ref_sus_abx = smol_sus['abx_class'].str.lower()
ref_abx_df = resistant_mock
def get_posneg(row):
if row['drug_class'] in ref_abx:
return "true_positive"
elif row['drug_class'] in resistant_mock['Abx_split']: # I would rather explicitly have it search that it is not here
return "true_positive"
elif row['drug_class'] in ref_sus_abx:
return "false_positive"
else:
return 'unknown'
return
#print(cured_ham.info())
#print(ref_sus_abx.describe())
cured_ham['True_Positive'] = (cured_ham
.apply(lambda x: x['drug_class'] in ref_abx, axis = 1))
#cured_ham['False_Positive'] = (cured_ham
# .apply(lambda x: x['drug_class'] in ref_sus_abx, axis=1))
#ref_abx_df['False_Negative'] = (ref_abx_df.apply(lambda x: x['abx_class'] not in cured_ham['drug_class'], axis=1))
#ref_abx_df['False_Negative'].unique()
cured_ham_dc = pd.DataFrame(cured_ham['drug_class'])
false_negatives = ref_abx_df[~(ref_abx_df['abx_class']
.isin(cured_ham_dc['drug_class']))]
abx_melted['abx_class'] = abx_melted['abx_class'].str.lower()
false_negatives = cured_ham_dc[~(cured_ham_dc['drug_class']
.isin(ref_abx_df['abx_class']))]
#false_negatives.shape ## precursor to smol_sus; using smol_sus moving forward
## sorry that the variables make less sense as I add things. this is directly related to my sleep quality
## and how much I would like to be done with this project.
# This is where <NAME> did some data cleaning in R. So I exported the above, and read in the results from her cleaning below. SHould probs rewrite what she did in python :,(
# The following code block is originally written by Brooke in R, I have rewritten it in python so that we can use one script to do everything.
# ## R to Python: Drug Class Cleaning
## based on brooke r code
#Summarizing values
## R code
#HAM$drug_class <- str_remove(HAM$drug_class, "antibiotic")
#HAM$drug_class <- str_remove(HAM$drug_class, "resistant")
#HAM$drug_class <- str_remove(HAM$drug_class, "resistance")
## python
cured_ham['drugclass_new'] = cured_ham.drug_class.str.replace('antibiotic' , '')
cured_ham['drugclass_new'] = cured_ham.drugclass_new.str.replace('resistant' , '')
cured_ham['drugclass_new'] = cured_ham.drugclass_new.str.replace('resistance' , '')
### R code
#HAM <- HAM %>% mutate(class_new = case_when(drug_class %in% c("amikacin", "kanamycin", "streptomycin","tobramycin", "kanamycin","spectinomycin", "gentamicin", "aminoglycoside") ~ "Aminoglycoside",
# drug_class %in% c("phenicol", "chloramphenicol") ~ "Phenicol",
# drug_class %in% c("quinolone", "fluoroquinolone", "ciprofloxacinir", "fluoroquinolones") ~ "Quinolones and Fluoroquinolones",
# drug_class %in% c("macrolide", "erythromycin", "mls", "azithromycin", "telythromycin") ~ "Macrolide",
# drug_class %in% c("tetracycline", "glycylcycline") ~ "Tetracycline",
# drug_class %in% c("ampicillin", "methicillin", "penicillin", "amoxicillinclavulanicacid") ~ "Penicillin",
# drug_class %in% c("colistin", "polymyxin", "bacitracin", "bicyclomycin") ~ "Polypeptides",
# drug_class %in% c("cephalosporin", "cefoxatin", "ceftriaxone") ~ "Cephalosporin",
# drug_class %in% c("carbapenem", "penem", "meropenem") ~ "Carbapenem",
# drug_class %in% c("unclassified", "efflux", "acid", "unknown", "multidrug", "multidrugputative", "mutationsonrrnagene16s") ~ "Unclassified",
# drug_class %in% c("linezolid", "oxazolidinone") ~ "Oxazolidinone",
# drug_class %in% c("betalactam", "penam", "betalactamase") ~ "Unspecified Betalactam",
# drug_class %in% c("acridinedye") ~ "Acridine dye",
# drug_class %in% c("antibacterialfreefattyacids") ~ "Free fatty acids",
# drug_class %in% c("benzalkoniumchloride", "quaternaryammonium") ~ "Benzalkonium chlorides",
# drug_class %in% c("peptide") ~ "Unspecified peptide",
# drug_class %in% c("nucleoside") ~ "Unspecified nucleoside",
# drug_class %in% c("fusidicacid") ~ "Fucidic acid",
# drug_class %in% c("sulfonamides", "sulfisoxazole", "sulfonamide") ~ "Sufonamides",
## drug_class %in% c("coppersilver") ~ "copper,silver",
# drug_class %in% c("phenicolquinolone") ~ "phenicol,quinolone", TRUE ~ drug_class)) %>%
# separate(class_new, into = c("d1", "d2"), sep = "([;,/])")
### python
## make new dict with drug class:drug coding that brooke implemented
cleaning_class = {"Aminoglycoside": ["amikacin", "kanamycin", "streptomycin","tobramycin", "kanamycin","spectinomycin", "gentamicin", "aminoglycoside",'aminocoumarin'],
"Phenicol" : ["phenicol", "chloramphenicol"],
"Quinolones and Fluoroquinolones" : ["quinolone", "fluoroquinolone", "ciprofloxacinir", "fluoroquinolones"],
"Macrolide" : ["macrolide", "erythromycin", "mls", "azithromycin", "telythromycin"],
"Tetracycline" : ["tetracycline", "glycylcycline"],
"Penicillin" : ["ampicillin", "methicillin", "penicillin", "amoxicillinclavulanicacid"],
"Polypeptides" : ["colistin", "polymyxin", "bacitracin", "bicyclomycin"],
"Cephalosporin" : ["cephalosporin", "cefoxatin", "ceftriaxone"],
"Carbapenem" : ["carbapenem", "penem", "meropenem"],
"Unclassified" : ["unclassified", "efflux", "acid", "unknown", "multidrug", "multidrugputative",
"mutationsonrrnagene16s", 'warning', 'geneismissingfromnotesfilepleaseinformcurator', ## clean up mess from split
'ant2ia', 'aph6id', 'monobactam', 'shv52a', 'rblatem1'], # again, mess from split
"Oxazolidinone" : ["linezolid", "oxazolidinone"],
"Betalactam": ["betalactam", "penam", "betalactamase"], ## no need to be "unspecificed" to removed that string
"Acridine dye" : ["acridinedye"],
"Free fatty acids" : ["antibacterialfreefattyacids"],
"Benzalkonium chlorides": ["benzalkoniumchloride", "quaternaryammonium"],
"Unspecified peptide" : ["peptide"],
"Unspecified nucleoside" : ["nucleoside"],
"Fucidic acid" : ["fusidicacid"],
"Sufonamides" : ["sulfonamides", "sulfisoxazole", "sulfonamide"],
"copper,silver" : ["coppersilver", 'copper, silver'],
"phenicol,quinolone" : ["phenicolquinolone"],
"penicillin" : ['amoxicillin/clavulanicacid']
}
## invert dictionary so the value is the new value, key is old value
new_d = {vi: k for k, v in cleaning_class.items() for vi in v}
## replace key value in column with value from dict (drug class new)
cooked_ham = cured_ham.replace({"drugclass_new": new_d})
## deep down, i am crying, because new_d worked partially but not completely
## i thought I took care of you, new_d, how could you do me like this
#### R code: this part did not need to be copied to python; the R version made some duplicate rows from the transofrmation but the python version doesn't do this
#Rearranging rows that needed to be transposed
#Dups1 <- HAM %>% filter(drug_class %in% c("coppersilver","phenicolquinolone")) %>%
# select(-d2) %>% rename(class_new = d1)
#Dups2 <- HAM %>% filter(drug_class %in% c("coppersilver","phenicolquinolone")) %>%
# select(-d1) %>% rename(class_new = d2)
#dupout <- HAM %>% filter(is.na(d2)) %>% select(-d2) %>% rename(class_new = d1)
# ## Back to the OG Python Script
#cooked_ham = cooked_ham.append(resfinder_results, ignore_index = True)
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].str.lower()
### remove those with Unclassified drug class
to_drop = ['unclassified', 'unknown', 'other', 'multidrug', '', 'target', 'efflux', # some of these are artifacts of cleaning/split
'mutationsonproteingene', 'mfsefflux', 'genemodulating', 'mateefflux', 'rndefflux','chloramphenicolacetyltransferasecat', 'abcefflux', 'otherarg', 'genemodulatingefflux', 'rrnamethyltransferase']
cooked_ham = cooked_ham[~cooked_ham['drugclass_new'].isin(to_drop)]
## remove unspecified string bewcause it will mess up matching
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].astype(str)
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('unspecified ', ""))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('quinolones and fluoroquinolones', "quinolone"))
## some individual abx made it to drug class col, changing manually to drug class.
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('telithromycin', "macrolide"))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('daptomycin', "lipopeptide"))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('cefoxitin', "cephalosporin"))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('classcbetalactamase', 'betalactam'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('classabetalactamase', 'betalactam'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('classbbetalactamase', 'betalactam'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('classdbetalactamase', 'betalactam'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('betalactamalternatename', 'betalactam'))
#aminoglycosideaminocoumarin
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosideaminocoumarin', 'aminoglycoside'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosidealternatename', 'aminoglycoside'))
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosidephosphotransferase', 'aminoglycoside')) ## phosphotransferase would be filtered out as other drug class, so ignore here
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosidenucleotidyltransferase', 'aminoglycoside')) ## same w nucleo...
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosideacetyltransferase', 'aminoglycoside')) ## same w acetyl..
#cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('aminoglycosideaminocoumarin', 'aminoglycoside')) ## same w acetyl..
cooked_ham['drugclass_new'] = cooked_ham['drugclass_new'].map(lambda x: x.replace('tetracyclineefflux', 'tetracycline')) ##
## those that fall into "other" drug class are left as the abx
# we can keep triclosan bc some abx map to triclosan
## inspect that it worked in interactive
#cooked_ham['drugclass_new'].unique()
#cooked_ham.shape ## shorter than above. bc drops those with unclassified / unknown
def assign_true_pos(x):
if x in(ref_abx.unique()):
return "true_positive"
elif x in(smol_sus['abx_class'].unique()):
return "false_positive"
else:
return "unknown"
cooked_ham['true_positive'] = cooked_ham['drugclass_new'].apply(lambda x: assign_true_pos(x)) # true is true pos, false is false neg
combo_counts = cooked_ham.true_positive.value_counts()
combo_name = outdir + "combo_counts_" + res_name + ".txt"
combo_counts.to_csv(combo_name)
print("\n")
# first need to do some grouping by tool in cooked_ham
grouped_ham = cooked_ham.groupby(['analysis_software_name'])
ham_name = outdir + "cooked_ham_w_true_pos_" + res_name + ".csv"
cooked_ham.to_csv(ham_name)
# Analysis below
grp_abx_results = grouped_ham['drugclass_new'].value_counts().to_frame()
name_grp_results = outdir + "grouped_by_tool_drug_class" + res_name + ".csv"
grp_abx_results.to_csv(name_grp_results)
#grp_abx_results
pos_count = grouped_ham['true_positive'].value_counts().to_frame().unstack(1, fill_value = 0)
#pos_count.columns = ['analysis_software_name', 'positive_classification', 'count']
### get false neg dataset
abx_melted['abx_class'] = abx_melted['abx_class'].str.lower() # copied above but def need here
## drop other from abx_meltered because we excluded unknowns
abx_melted = abx_melted[abx_melted.abx_class != 'other']
## get true abx not in our sample
not_in_mock = abx_melted[~abx_melted['abx_class'].isin(ref_abx)]
not_in_mock['abx_class'].unique()
mock_negatives = not_in_mock['abx_class'].unique() # list of what would be true negative values
## use smol_sus for only what is KNOWN as negative
def fetch_negatives(df, tool):
df = df[df.analysis_software_name == tool]
## filter for the tool
not_in_ham = abx_melted[~abx_melted['abx_class'].isin(df['drugclass_new'])]
return not_in_ham['abx_class'].unique()
tool_list = list(cooked_ham['analysis_software_name'].unique())
negs = {}
total_negatives = []
for i in tool_list:
intermediate = fetch_negatives(cooked_ham, i)
# print(i, "\n",len(intermediate))
# print(intermediate)
## these appear to be real true negatives - these Abx classes exist, and they are not in any of the hams
name = str(i )
# join whats not in ham and not in mock for true neg
negs[name] = intermediate
for n in intermediate:
if n not in total_negatives and n not in cooked_ham['drugclass_new']:
total_negatives.append(n)
all_pos_abx = resistant_mock['abx_class'].unique()
neg_abx = smol_sus.abx_class.unique()
neg_count = pd.DataFrame(columns=['tool', "false-neg", "true-neg"])
for tool in negs:
tool_falseneg_count = 0
tool_trueneg_count = 0
negatives = negs[tool]
for n in negatives:
#print(tool, n, "\n")
#print([n])
if n in neg_abx: #neg_abx for known negative, mock_negatives for negative results including unknown susceptibility
#print("true_negative: ", tool, n)
tool_trueneg_count += 1
elif n in all_pos_abx:
#print("false negative: ", tool, n)
tool_falseneg_count += 1
df2 = {'tool': tool, 'false-neg': tool_falseneg_count, 'true-neg': tool_trueneg_count}
neg_count = neg_count.append(df2, ignore_index = True)
print("False negatives from ", tool, ": ", tool_falseneg_count)
print("True negatives from ", tool, ": ", tool_trueneg_count)
print("\n\n")
counts = pd.merge(pos_count, neg_count, right_on = "tool", how = "outer",left_index=True, right_index=False)
#counts ## this spits out an error, but we can ignore it bc it's due to the double labels from pos counts
## what re counts if we merge all results
pos_count_total = cooked_ham['true_positive'].value_counts().to_frame().unstack(1, fill_value = 0)
#pos_count.columns = ['analysis_software_name', 'positive_classification', 'count']\
## negs
## drugs not detected but that exist in the world
not_in_ham = ref_abx[~ref_abx.isin(cooked_ham['drugclass_new'])]
#not_in_ham
negs_in_ham = cooked_ham['drugclass_new'][~cooked_ham['drugclass_new'].isin(ref_abx)]
#negs_in_ham.unique()
cooked_ham[cooked_ham['drugclass_new'].isin(neg_abx)] ## all false positives are nitro for oqxB/A gene
## get false/true negs
tot_trueneg_count = 0
tot_falseneg_count = 0
for n in total_negatives:
#print(tool, n, "\n")
# print([n])
if n in neg_abx: #neg_abx for known negative, mock_negatives for negative results including unknown susceptibility
#print("true_negative: ", tool, n)
if n not in cooked_ham['drugclass_new']:
tot_trueneg_count += 1
#print("true neg: ", n)
elif n in all_pos_abx:
#print("false negative: ", tool, n)
if n not in cooked_ham['drugclass_new']:
tot_falseneg_count += 1
#print('false_neg:', n)
#print("False negatives: ", tot_falseneg_count)
#print("True negatives: ", tot_trueneg_count)
## write in a check for the cols we expect
if ('true_positive','false_positive') in counts.columns:
pass
else:
counts[('true_positive', 'false_positive') ] = int(0)
if ('true_positive','unknown') in counts.columns:
pass
else:
counts[('true_positive', 'unknown') ] = int(0)
# # <NAME>
#
# The following table is what all this code is for.
## sensitivity / specificity analysis
## sensitivity = true_positives / (true_positives + false_negatives)
try:
counts['sensitivity'] = counts[('true_positive', 'true_positive')] / (counts[('true_positive','true_positive')] + counts['false-neg'])
except ZeroDivisionError:
print('can\'t calculate sensitivity because no values detected. Are you sure your data looks right?')
# precision = true positives / false_positives + true_positives
counts['precision'] = counts[('true_positive', 'true_positive')] / ( counts['true_positive', 'false_positive'] + counts[('true_positive', 'true_positive')] )
## specificity = true_negative / (true_negative + false_positi
try:
counts['specificity'] = counts['true-neg'] / (counts['true-neg'] + counts[('true_positive', 'false_positive')])
except ZeroDivisionError:
print("Can't calculate specificity because there are no observed true negatives or false positives.")
counts['specificity'] = int(0)
## accuracy = (true_positive + true_negative) / (true_positive + false_positive + true_negative + false_negative)
counts['accuracy'] = (counts[('true_positive', 'true_positive')] + counts['true-neg']) / (counts[('true_positive', 'true_positive')] + counts['true_positive','false_positive'] + counts['true-neg'] + counts['false-neg'] )
## recall = true pos / (true pos + false neg)
#### since sensitivity and recall are functionally the same, removing recall
#counts['recall'] = counts[('true_positive', 'true_positive')] / (counts[('true_positive', 'true_positive')] + counts['false-neg'])
#counts['recall'] = pd.to_numeric(counts['recall'])
# F1 currently faulty (returning values outside expected range) so removing from the pipeline
## 2 * (precision * recall) / (precision + recall)
#counts['F1'] = 2 * ( (counts['precision'] * counts['recall']) / (counts['precision'] + counts['recall']) )
#except ZeroDivisionError:
# counts['F1'] = 0
counts['percent_unclassified'] = counts[('true_positive', 'unknown')] / (counts[('true_positive', 'true_positive')] + counts[('true_positive', 'false_positive')] + counts[('true_positive', 'unknown')])
#print("<NAME>, ", this_run_res, ": ")
name = outdir + "thanksgiving_ham_" + res_name + ".csv"
counts.to_csv(name)
#print(counts) ## print out if interactive; does nothing if command line
print("\nYou have new results! Check it out in ", name, "\n")
# # Canned Ham
# condensing the results
# note that these aren't particularly informative. it's here in case reviewers really want it.
cooked_ham = cooked_ham.assign(condensed_gene = cooked_ham.groupby('analysis_software_name')['input_gene_start'].shift(-1))
def condense_results(df):
start_col = df['input_gene_start']
stop_col = df['input_gene_stop']
next_row = df['condensed_gene']
keep_val = ""
condense_val = ""
unconclusive_val = ""
if next_row < stop_col: # if the start of the next gene overlaps with the end of THIS gene
condense_val = "condense"
elif stop_col < next_row:
keep_val = "keep"
else:
unconclusive_val = "unconclusive"
message = str( condense_val + keep_val + unconclusive_val)
return(message)
cooked_ham['condense_action'] = cooked_ham.apply(lambda x: condense_results(x), axis=1) ## does this need to be grouped by analysis software name ?
#cooked_ham['condense_action'].describe()
## looks like there are 1811 instances where AMR genes overlap. Lets remove these and see what happens.
canned_ham = cooked_ham[cooked_ham['condense_action'] != "condense"]
## i am very proud of myself for this name
## its the little things in life that bring me smiles
## positives
grouped_can = canned_ham.groupby(['analysis_software_name'])
pos_count2 = grouped_can['true_positive'].value_counts().to_frame().unstack(1, fill_value = 0)
## add negatives
negs2={}
for i in tool_list:
intermediate = fetch_negatives(canned_ham, i)
#print(i, "\n",len(intermediate))
## these appear to be real true negatives - these Abx classes exist, and they are not in any of the hams
name = str(i )
# join whats not in ham and not in mock for true neg
negs2[name] = intermediate
## negative counts
neg_count2 = pd.DataFrame(columns=['tool', "false-neg", "true-neg"])
for tool in negs2:
tool_falseneg_count = 0
tool_trueneg_count = 0
negatives = negs2[tool]
for n in negatives:
# print(tool, n, "\n")
#print([n])
if n in neg_abx:
#print("true_negative: ", tool, n)
tool_trueneg_count += 1
elif n in neg_abx: ## only things that are known to be negative
#print("false negative: ", tool, n)
tool_falseneg_count += 1
df3 = {'tool': tool, 'false-neg': tool_falseneg_count, 'true-neg': tool_trueneg_count}
neg_count2 = neg_count2.append(df3, ignore_index = True)
#print("False negatives from ", tool, ": ", tool_falseneg_count)
#print("Truenegatives from ", tool, ": ", tool_trueneg_count)
## merge this all together
counts2 = | pd.merge(pos_count2, neg_count2, right_on = "tool", how = "outer",left_index=True, right_index=False) | pandas.merge |
import numpy as np
import pandas as pd
import pickle
from pathlib import Path
import covid19
from COVID19.model import AgeGroupEnum, EVENT_TYPES, TransmissionTypeEnum
from COVID19.model import Model, Parameters, ModelParameterException
import COVID19.simulation as simulation
from analysis_utils import ranker_I, check_fn_I, ranker_IR, check_fn_IR, roc_curve, events_list
from abm_utils import status_to_state, listofhouses, dummy_logger, quarantine_households
#import sib
#import greedy_Rank
def loop_abm(params,
inference_algo,
logger = dummy_logger(),
input_parameter_file = "./abm_params/baseline_parameters.csv",
household_demographics_file = "./abm_params/baseline_household_demographics.csv",
parameter_line_number = 1,
seed=1,
initial_steps = 0,
num_test_random = 50,
num_test_algo = 50,
fraction_SM_obs = 0.2,
fraction_SS_obs = 1,
quarantine_HH = False,
test_HH = False,
name_file_res = "res",
output_dir = "./output/",
save_every_iter = 5,
stop_zero_I = True,
adoption_fraction = 1.0,
fp_rate = 0.0,
fn_rate = 0.0,
smartphone_users_abm = False, # if True use app users fraction from OpenABM model
callback = lambda x : None,
data = {}
):
'''
Simulate interventions strategy on the openABM epidemic simulation.
input
-----
params: Dict
Dictonary with openABM to set
inference_algo: Class (rank_template)
Class for order the nodes according to the prob to be infected
logger = logger for printing intermediate steps
results:
print on file true configurations and transmission
'''
params_model = Parameters(input_parameter_file,
parameter_line_number,
output_dir,
household_demographics_file)
### create output_dir if missing
fold_out = Path(output_dir)
if not fold_out.exists():
fold_out.mkdir(parents=True)
### initialize a separate random stream
rng = np.random.RandomState()
rng.seed(seed)
### initialize ABM model
for k, val in params.items():
params_model.set_param(k, val)
model = Model(params_model)
model = simulation.COVID19IBM(model=model)
T = params_model.get_param("end_time")
N = params_model.get_param("n_total")
sim = simulation.Simulation(env=model, end_time=T, verbose=False)
house = covid19.get_house(model.model.c_model)
housedict = listofhouses(house)
has_app = covid19.get_app_users(model.model.c_model) if smartphone_users_abm else np.ones(N,dtype = int)
has_app &= (rng.random(N) <= adoption_fraction)
### init data and data_states
data_states = {}
data_states["true_conf"] = np.zeros((T,N))
data_states["statuses"] = np.zeros((T,N))
data_states["tested_algo"] = []
data_states["tested_random"] = []
data_states["tested_SS"] = []
data_states["tested_SM"] = []
for name in ["num_quarantined", "q_SS", "q_SM", "q_algo", "q_random", "q_all", "infected_free", "S", "I", "R", "IR", "aurI", "prec1%", "prec5%", "test_+", "test_-", "test_f+", "test_f-"]:
data[name] = np.full(T,np.nan)
data["logger"] = logger
### init inference algo
inference_algo.init(N, T)
### running variables
indices = np.arange(N, dtype=int)
excluded = np.zeros(N, dtype=bool)
daily_obs = []
all_obs = []
all_quarantined = []
freebirds = 0
num_quarantined = 0
fp_num = 0
fn_num = 0
p_num = 0
n_num = 0
noise_SM = rng.random(N)
nfree = params_model.get_param("n_seed_infection")
for t in range(T):
### advance one time step
sim.steps(1)
status = np.array(covid19.get_state(model.model.c_model))
state = status_to_state(status)
data_states["true_conf"][t] = state
nS, nI, nR = (state == 0).sum(), (state == 1).sum(), (state == 2).sum()
if nI == 0 and stop_zero_I:
logger.info("stopping simulation as there are no more infected individuals")
break
if t == initial_steps:
logger.info("\nobservation-based inference algorithm starts now\n")
logger.info(f'time:{t}')
### extract contacts
daily_contacts = covid19.get_contacts_daily(model.model.c_model, t)
logger.info(f"number of unique contacts: {len(daily_contacts)}")
### compute potential test results for all
if fp_rate or fn_rate:
noise = rng.random(N)
f_state = (state==1)*(noise > fn_rate) + (state==0)*(noise < fp_rate) + 2*(state==2)
else:
f_state = state
to_quarantine = []
all_test = []
excluded_now = excluded.copy()
fp_num_today = 0
fn_num_today = 0
p_num_today = 0
n_num_today = 0
def test_and_quarantine(rank, num):
nonlocal to_quarantine, excluded_now, all_test, fp_num_today, fn_num_today, p_num_today, n_num_today
test_rank = []
for i in rank:
if len(test_rank) == num:
break;
if excluded_now[i]:
continue
test_rank += [i]
if f_state[i] == 1:
p_num_today += 1
if state[i] != 1:
fp_num_today += 1
q = housedict[house[i]] if quarantine_HH else [i]
excluded_now[q] = True
to_quarantine += q
excluded[q] = True
if test_HH:
all_test += q
else:
all_test += [i]
else:
n_num_today += 1
if state[i] == 1:
fn_num_today += 1
excluded_now[i] = True
all_test += [i]
return test_rank
### compute rank from algorithm
num_test_algo_today = num_test_algo
if t < initial_steps:
daily_obs = []
num_test_algo_today = 0
weighted_contacts = [(c[0], c[1], c[2], 2.0 if c[3] == 0 else 1.0) for c in daily_contacts if (has_app[c[0]] and has_app[c[1]])]
if nfree == 0 and quarantine_HH:
print("faster end")
rank_algo = np.zeros((N,2))
rank_algo[:, 0]=np.arange(N)
rank_algo[:, 1]=np.random.rand(N)
else:
rank_algo = inference_algo.rank(t, weighted_contacts, daily_obs, data)
rank = np.array(sorted(rank_algo, key= lambda tup: tup[1], reverse=True))
rank = [int(tup[0]) for tup in rank]
### test num_test_algo_today individuals
test_algo = test_and_quarantine(rank, num_test_algo_today)
### compute roc now, only excluding past tests
eventsI = events_list(t, [(i,1,t) for (i,tf) in enumerate(excluded) if tf], data_states["true_conf"], check_fn = check_fn_I)
xI, yI, aurI, sortlI = roc_curve(dict(rank_algo), eventsI, lambda x: x)
### test all SS
SS = test_and_quarantine(indices[status == 4], N)
### test a fraction of SM
SM = indices[(status == 5) & (noise_SM < fraction_SM_obs)]
SM = test_and_quarantine(SM, len(SM))
### do num_test_random extra random tests
test_random = test_and_quarantine(rng.permutation(N), num_test_random)
### quarantine infected individuals
num_quarantined += len(to_quarantine)
covid19.intervention_quarantine_list(model.model.c_model, to_quarantine, T+1)
### update observations
daily_obs = [(int(i), int(f_state[i]), int(t)) for i in all_test]
all_obs += daily_obs
### exclude forever nodes that are observed recovered
rec = [i[0] for i in daily_obs if f_state[i[0]] == 2]
excluded[rec] = True
### update data
data_states["tested_algo"].append(test_algo)
data_states["tested_random"].append(test_random)
data_states["tested_SS"].append(SS)
data_states["tested_SM"].append(SM)
data_states["statuses"][t] = status
data["S"][t] = nS
data["I"][t] = nI
data["R"][t] = nR
data["IR"][t] = nR+nI
data["aurI"][t] = aurI
prec = lambda f: yI[int(f/100*len(yI))]/int(f/100*len(yI)) if len(yI) else np.nan
ninfq = sum(state[to_quarantine]>0)
nfree = int(nI - sum(excluded[state == 1]))
data["aurI"][t] = aurI
data["prec1%"][t] = prec(1)
data["prec5%"][t] = prec(5)
data["num_quarantined"][t] = num_quarantined
data["test_+"][t] = p_num
data["test_-"][t] = n_num
data["test_f+"][t] = fp_num
data["test_f-"][t] = fn_num
data["q_SS"][t] = len(SS)
data["q_SM"][t] = len(SM)
sus_test_algo = sum(state[test_algo]==0)
inf_test_algo = sum(state[test_algo]==1)
rec_test_algo = sum(state[test_algo]==2)
inf_test_random = sum(state[test_random]==1)
data["q_algo"][t] = inf_test_algo
data["q_random"][t] = sum(state[test_random]==1)
data["infected_free"][t] = nfree
asbirds = 'a bird' if nfree == 1 else 'birds'
fp_num += fp_num_today
fn_num += fn_num_today
n_num += n_num_today
p_num += p_num_today
### show output
logger.info(f"True : (S,I,R): ({nS:.1f}, {nI:.1f}, {nR:.1f})")
logger.info(f"AUR_I : {aurI:.3f}, prec(1% of {len(yI)}): {prec(1):.2f}, prec5%: {prec(5):.2f}")
logger.info(f"SS: {len(SS)}, SM: {len(SM)}, results test algo (S,I,R): ({sus_test_algo},{inf_test_algo},{rec_test_algo}), infected test random: {inf_test_random}/{num_test_random}")
logger.info(f"false+: {fp_num} (+{fp_num_today}), false-: {fn_num} (+{fn_num_today})")
logger.info(f"...quarantining {len(to_quarantine)} guys -> got {ninfq} infected, {nfree} free as {asbirds} ({nfree-freebirds:+d})")
freebirds = nfree
### callback
callback(data)
if t % save_every_iter == 0:
df_save = pd.DataFrame.from_records(data, exclude=["logger"])
df_save.to_csv(output_dir + name_file_res + "_res.gz")
# save files
df_save = pd.DataFrame.from_records(data, exclude=["logger"])
df_save.to_csv(output_dir + name_file_res + "_res.gz")
with open(output_dir + name_file_res + "_states.pkl", mode="wb") as f_states:
pickle.dump(data_states, f_states)
sim.env.model.write_individual_file()
df_indiv = pd.read_csv(output_dir+"individual_file_Run1.csv", skipinitialspace = True)
df_indiv.to_csv(output_dir+name_file_res+"_individuals.gz")
sim.env.model.write_transmissions()
df_trans = | pd.read_csv(output_dir+"transmission_Run1.csv") | pandas.read_csv |
# Credit card fruad transaction data
# Undersampling - logistic regression - bagging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
def data_out(o):
import csv
with open("output-RanFor.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(o)
def play(N,n,d):
print("N=",N)
print("n=",n)
df = | pd.read_csv('./trainccard.csv') | pandas.read_csv |
"""
Authors: <NAME> and <NAME>
"""
from bloomberg import BBG
import pandas as pd
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
bbg = BBG()
# Brazil FGV Consumer Confidence Index SA Sep 2005=100
# Original Date: '30-sep-2005'
start_date = | pd.to_datetime('01-jan-2010') | pandas.to_datetime |
"""
This script cleans the data
"""
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
from scipy.signal import savgol_filter as sg
from sklearn.feature_selection import RFECV
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
import shap
from auxiliary import week_of_month, BlockingTimeSeriesSplit
from config import DATA_CONSUMPTION_PROCESSED_FILE, INTERVENTION_CALENDAR, \
DATA_WEATHER_PROCESSED_FILE, BEST_FEATURES_FILE, ALL_NUMERICAL_FEATURES,ALL_CATEGORICAL_FEATURES, \
DATA_VACATIONS_INTERVENTION_FILE, DATA_METADATA_PROCESSED_FILE, DATA_HOLIDAYS_PROCESSED_FILE, \
DATA_ISO_CONSUMPTION_PROCESSED_FILE, DATA_ENTHALPY_GRADIENTS_PROCESSED_FILE, DATA_VACATIONS_FILE, \
DATA_SOLAR_GAINS_PROCESSED_FILE, DYNAMIC_SPACE, EXPERIMENTS, CONTROL_GROUPS, BEST_PARAMETERS_FILE
from custom_scorer_module import scorer_quantile, scorer_rmse
def lgbm_regression_efecto_acumulado_con_linea_base_del_experimento(alpha,
data_mean,
get_best_parameters=False,
get_best_features=False,
use_best_features=False):
# INITIALIZE NEW FIELDS
numerical_features_list = ALL_NUMERICAL_FEATURES
categorical_features_list = ALL_CATEGORICAL_FEATURES
new_field = "GBM_consumption_kWh_" + alpha
data_mean[new_field] = 0.0
for experiment, control in zip(EXPERIMENTS, CONTROL_GROUPS):
# GET DATA ABOUT PERIODS OF INTERVENTION OF THE EXPERIMENTAL PERIOD
intervention_data = INTERVENTION_CALENDAR[experiment]
pre_period = intervention_data[1]
post_period = intervention_data[2]
range_pre_intervention_period = pd.date_range(start=pre_period[0], end=pre_period[1], freq='D')
range_post_intervention_period = pd.date_range(start=post_period[0], end=post_period[1], freq='D')
if use_best_features:
best_features = open_best_features()
features = best_features[alpha][str(experiment)]
categorical_features_list = [x for x in categorical_features_list if x in features]
numerical_features_list = [x for x in numerical_features_list if x in features]
# GET TRAINING AND VALIDATION SET
X, y = get_training_validation_set(CONTROL_GROUPS,
data_mean,
range_pre_intervention_period,
categorical_features_list,
numerical_features_list,
experiment,
"CONSUMPTION_kWh",
get_best_parameters,
get_best_features)
if get_best_parameters and get_best_features:
best_parameters = open_best_parameters()
best_features = open_best_features()
best_parameters[alpha][str(experiment)], best_features[alpha][
str(experiment)] = get_best_params_and_features(X, y, float(alpha))
save_best_parameters(best_parameters)
save_best_features(best_features)
elif get_best_parameters:
best_parameters = open_best_parameters()
best_parameters[alpha][str(experiment)] = get_best_params(X, y, float(alpha))
save_best_parameters(best_parameters)
elif get_best_features:
best_parameters = open_best_parameters()
params = best_parameters[alpha][str(experiment)]
params['alpha'] = float(alpha)
trained_model = lgb.train(params,
lgb.Dataset(X, y, categorical_feature=categorical_features_list)
)
explainer = shap.TreeExplainer(trained_model)
shap_values = explainer.shap_values(X)
shap_sum = np.abs(shap_values).mean(axis=0)
best_features = open_best_features()
best_features[alpha][str(experiment)] = [f for f, s in zip(X.columns.tolist(), shap_sum.tolist()) if s >= 0.005]
save_best_features(best_features)
importance_df = pd.DataFrame([X.columns.tolist(), shap_sum.tolist()]).T
importance_df.columns = ['column_name', 'shap_importance']
importance_df = importance_df.sort_values('shap_importance', ascending=False)
print(importance_df)
else:
print('training experiment {}, with alpha {}, with control {}'.format(experiment, alpha, control))
best_parameters = open_best_parameters()
params = best_parameters[alpha][str(experiment)]
params['alpha'] = float(alpha)
trained_model = lgb.train(params,
lgb.Dataset(X, y, categorical_feature=categorical_features_list)
)
# explainer = shap.TreeExplainer(trained_model)
# shap_values = explainer.shap_values(X)
# shap_sum = np.abs(shap_values).mean(axis=0)
# importance_df = pd.DataFrame([X.columns.tolist(), shap_sum.tolist()]).T
# importance_df.columns = ['column_name', 'shap_importance']
# importance_df = importance_df.sort_values('shap_importance', ascending=False)
# print(importance_df)
# fold_importance_df = pd.DataFrame()
# fold_importance_df["feature"] = X.columns
# fold_importance_df["importance"] = trained_model.feature_importance()
# fold_importance_df = fold_importance_df.sort_values(by="importance", ascending=False)
# print(fold_importance_df)
# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)
# shap.summary_plot(shap_values, X, plot_type="bar")
data_mean = predict(new_field,
data_mean,
trained_model,
experiment,
numerical_features_list,
categorical_features_list,
range_post_intervention_period,
range_pre_intervention_period)
print("SUCCESS!")
return data_mean
def open_best_parameters():
with open(BEST_PARAMETERS_FILE, 'r') as fp:
data = json.load(fp)
return data
def open_best_features():
with open(BEST_FEATURES_FILE, 'r') as fp:
data = json.load(fp)
return data
def save_best_parameters(best_parameters):
with open(BEST_PARAMETERS_FILE, 'w') as fp:
json.dump(best_parameters, fp)
def save_best_features(best_parameters):
with open(BEST_FEATURES_FILE, 'w') as fp:
json.dump(best_parameters, fp)
def predict(new_field,
data_mean,
final_model,
experiment,
numerical_features_list,
categorical_features_list,
range_post_intervention_period,
range_pre_intervention_period):
# GET ALL FEATURES
features = numerical_features_list + categorical_features_list
# ADOPT CATEGORICAL FEATURES SO THEY WORK AS EXPECTED
for c in categorical_features_list:
data_mean[c] = data_mean[c].astype('category')
# PREDICT FOR PREINTERVENTION
data_pre = data_mean[(data_mean.timestamp.isin(range_pre_intervention_period)) &
(data_mean.EXPERIMENT == experiment)]
data_mean.loc[data_mean['timestamp'].isin(range_pre_intervention_period) &
(data_mean.EXPERIMENT == experiment), new_field] = np.exp(final_model.predict(data_pre[features]))
# PREDICT FOR POSTINTERVENTION
data_post = data_mean[(data_mean.timestamp.isin(range_post_intervention_period)) &
(data_mean.EXPERIMENT == experiment)]
data_mean.loc[data_mean['timestamp'].isin(range_post_intervention_period) &
(data_mean.EXPERIMENT == experiment), new_field] = np.exp(
final_model.predict(data_post[features]))
return data_mean
def get_training_validation_set(control,
data_mean,
range_pre_intervention_period,
categorical_features_list,
numerical_features_list,
experiment,
target_feature_name,
get_best_parameters,
get_best_features):
df = data_mean.copy()
# GET TRAINING DATA
if get_best_parameters or get_best_features:
array = df[df.timestamp.isin(range_pre_intervention_period) |
(df.INTERVENTION.isin(control)) & (data_mean.EXPERIMENT == experiment)].values
columns = df.columns
data_train = pd.DataFrame(array, index=array[:, 0], columns=columns)
data_train = data_train.reset_index(drop=True)
else:
array = df[df.timestamp.isin(range_pre_intervention_period) |
(df.INTERVENTION.isin(control)) & (data_mean.EXPERIMENT == experiment)].values
# array = df[df.timestamp.isin(range_pre_intervention_period) & (df.EXPERIMENT == experiment)].values
columns = df.columns
data_train = pd.DataFrame(array, index=array[:, 0], columns=columns)
data_train = data_train.reset_index(drop=True)
# TRANSFORM INTO APPROPIATE VALUES
for c in categorical_features_list:
data_train[c] = data_train[c].astype('category')
for c in numerical_features_list + [target_feature_name]:
data_train[c] = data_train[c].astype('float32')
# TRANSFORM IT TO LOG1P domain
data_train[target_feature_name] = np.log(data_train[target_feature_name].astype('float'))
# SPPLIT FINALLY
features_list = numerical_features_list + categorical_features_list
X = data_train[features_list]
y = data_train[target_feature_name]
return X, y
def get_best_params(X, y, alpha):
grid_params = DYNAMIC_SPACE
scoring = make_scorer(scorer_rmse, greater_is_better=False)#, quantile=alpha)
regressor = lgb.LGBMRegressor(n_jobs=1,
metric='quantile',
objective='quantile')
grid = GridSearchCV(estimator=regressor,
param_grid=grid_params,
verbose=1,
cv=BlockingTimeSeriesSplit(n_splits=5),
n_jobs=-1,
scoring=scoring)
grid.fit(X, y)
return grid.best_params_
def get_best_params_and_features(X, y, alpha):
scoring = make_scorer(scorer_rmse, greater_is_better=False)#, quantile=alpha)
regressor = lgb.LGBMRegressor(n_jobs=1,
metric='quantile',
objective='quantile')
# selecf best features and then pass to the grid search
selector = RFECV(estimator=regressor,
step=1,
cv=BlockingTimeSeriesSplit(n_splits=5),
scoring=scoring)
# pipeline = Pipeline([("selector", selector), ("regressor",regressor)])
grid_params = {}
for name, space in DYNAMIC_SPACE.items():
grid_params["estimator__" + name] = space
grid = GridSearchCV(estimator=selector,
param_grid=grid_params,
verbose=1,
cv=BlockingTimeSeriesSplit(n_splits=5),
n_jobs=-1,
scoring=scoring)
grid.fit(X, y)
features = [f for f, s in zip(X.columns, grid.best_estimator_.support_) if s]
final_grid_params = {}
for name, space in grid.best_params_.items():
final_grid_params[name.split("__")[-1]] = space
return final_grid_params, features
def data_preprocessing_interventions():
real_consumption_df = pd.read_csv(DATA_CONSUMPTION_PROCESSED_FILE)
real_consumption_df['timestamp'] = pd.to_datetime(real_consumption_df['timestamp'])
vacations_data_df = pd.read_csv(DATA_VACATIONS_INTERVENTION_FILE)
vacations_data_df['timestamp'] = pd.to_datetime(vacations_data_df['timestamp'])
vacations_data_smapee_df = pd.read_csv(DATA_VACATIONS_FILE)
vacations_data_smapee_df['timestamp'] = pd.to_datetime(vacations_data_smapee_df['timestamp'])
weather_data_df = pd.read_csv(DATA_WEATHER_PROCESSED_FILE)
weather_data_df['timestamp'] = pd.to_datetime(weather_data_df['timestamp'])
metadata_df = pd.read_excel(DATA_METADATA_PROCESSED_FILE, sheets='SENSORS')[['smapee',
'ID_CEA',
'INTERVENTION',
'EXPERIMENT',
'GFA_m2',
'INCOME',
'BEDROOMS']]
holidays_df = pd.read_csv(DATA_HOLIDAYS_PROCESSED_FILE)
holidays_df['timestamp'] = pd.to_datetime(holidays_df['timestamp'])
gradients_df = pd.read_csv(DATA_ENTHALPY_GRADIENTS_PROCESSED_FILE)
gradients_df['timestamp'] = pd.to_datetime(gradients_df['timestamp'])
solar_gains_df = pd.read_csv(DATA_SOLAR_GAINS_PROCESSED_FILE)
solar_gains_df['timestamp'] = pd.to_datetime(solar_gains_df['timestamp'])
iso_consumption_df = | pd.read_csv(DATA_ISO_CONSUMPTION_PROCESSED_FILE) | pandas.read_csv |
#### Filename: Connection.py
#### Version: v1.0
#### Author: <NAME>
#### Date: March 4, 2019
#### Description: Connect to database and get atalaia dataframe.
import psycopg2
import sys
import os
import pandas as pd
import logging
from configparser import ConfigParser
from resqdb.CheckData import CheckData
import numpy as np
import time
from multiprocessing import Process, Pool
from threading import Thread
import collections
import datetime
import csv
from dateutil.relativedelta import relativedelta
import json
class Connection():
""" The class connecting to the database and exporting the data for the Slovakia.
:param nprocess: number of processes
:type nprocess: int
:param data: the name of data (resq or atalaia)
:type data: str
"""
def __init__(self, nprocess=1, data='resq'):
start = time.time()
# Create log file in the working folder
debug = 'debug_' + datetime.datetime.now().strftime('%d-%m-%Y') + '.log'
log_file = os.path.join(os.getcwd(), debug)
logging.basicConfig(filename=log_file,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info('Connecting to datamix database!')
# Get absolute path
path = os.path.dirname(__file__)
self.database_ini = os.path.join(path, 'database.ini')
# Read temporary csv file with CZ report names and Angels Awards report names
path = os.path.join(os.path.dirname(__file__), 'tmp', 'czech_mapping.json')
with open(path, 'r', encoding='utf-8') as json_file:
cz_names_dict = json.load(json_file)
# Set section
datamix = 'datamix-backup'
# datamix = 'datamix'
# Check which data should be exported
if data == 'resq':
# Create empty dictionary
# self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand', 'SELECT * from resq_ivttby_mix']
self.sqls = ['SELECT * from resq_mix', 'SELECT * from ivttby_mix', 'SELECT * from thailand']
# List of dataframe names
self.names = ['resq', 'ivttby', 'thailand']
elif data == 'atalaia':
self.sqls = ['SELECT * from atalaia_mix']
self.names = []
elif data == 'qasc':
self.sqls = ['SELECT * FROM qasc_mix']
self.names = []
elif data == 'africa':
self.sqls = ['SELECT * FROM africa_mix']
self.names = []
# Dictionary initialization - db dataframes
self.dictdb_df = {}
# Dictioanry initialization - prepared dataframes
self.dict_df = {}
if nprocess == 1:
if data == 'resq':
for i in range(0, len(self.names)):
df_name = self.names[i]
self.connect(self.sqls[i], datamix, nprocess, df_name=df_name)
# self.connect(self.sqls[2], datamix, nprocess, df_name='resq_ivttby_mix')
# self.resq_ivttby_mix = self.dictdb_df['resq_ivttby_mix']
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
# if 'resq_ivttby_mix' in self.dictdb_df.keys():
# del self.dictdb_df['resq_ivttby_mix']
for k, v in self.dictdb_df.items():
self.prepare_df(df=v, name=k)
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!".format(self.names[i]))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Get preprocessed data
self.preprocessed_data = self.check_data(df=self.df, nprocess=1)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not pd.isnull(x['HOSPITAL_DATE']) else None, axis=1)
#self.preprocessed_data['HOSPITAL_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_DATE'] + ' ' + self.preprocessed_data['HOSPITAL_TIME'])
except ValueError as error:
logging.error("Error occured when converting hospital date and time into timestamp object - {}.".format(error))
self.preprocessed_data['VISIT_DATE'] = self.preprocessed_data.apply(lambda x: self.fix_date(x['VISIT_DATE'], x['HOSPITAL_DATE']), axis=1)
self.preprocessed_data['VISIT_TIME'] = pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['VISIT_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['VISIT_DATE'], x['VISIT_TIME']) if not pd.isnull(x['VISIT_TIME']) and not pd.isnull(x['VISIT_DATE']) else None, axis=1)
#self.preprocessed_data['VISIT_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['VISIT_DATE'] + ' ' + self.preprocessed_data['VISIT_TIME'])
except ValueError as error:
logging.error("Error occured when converting visit date and time into timestamp object - {}.".format(error))
# Get difference in minutes between hospitalization and last visit
self.preprocessed_data['LAST_SEEN_NORMAL'] = self.preprocessed_data.apply(lambda x: self.time_diff(x['VISIT_TIMESTAMP'], x['HOSPITAL_TIMESTAMP']), axis=1)
self.preprocessed_data['LAST_SEEN_NORMAL'].fillna(0, inplace=True)
# Create new column to set if patient has stroke in hospital and recanalization procedures were entered in timestamps
self.preprocessed_data['HOSPITAL_STROKE_IVT_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_ONLY'] == 2) |
(self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['IVT_TBY_REFER'] == 2)),
'HOSPITAL_STROKE_IVT_TIMESTAMPS'] = 1
self.preprocessed_data['HOSPITAL_STROKE_TBY_TIMESTAMPS'] = np.nan
self.preprocessed_data.loc[
(self.preprocessed_data['HOSPITAL_STROKE'] == 1) &
((self.preprocessed_data['IVT_TBY'] == 2) |
(self.preprocessed_data['TBY_ONLY'] == 2) |
(self.preprocessed_data['TBY_REFER_LIM'] == 2) |
(self.preprocessed_data['TBY_REFER_ALL'] == 2)),
'HOSPITAL_STROKE_TBY_TIMESTAMPS'] = 1
elif data == 'atalaia':
self.connect(self.sqls[0], datamix, nprocess, df_name='atalaia_mix')
self.atalaiadb_df = self.dictdb_df['atalaia_mix']
#self.atalaia_preprocessed_data = self.prepare_atalaia_df(self.atalaiadb_df)
self.atalaia_preprocessed_data = self.atalaiadb_df.copy()
del self.dictdb_df['atalaia_mix']
elif data == 'qasc':
self.__get_qasc_df(datamix, nprocess)
elif data == 'africa':
self.__get_africa_df(datamix, nprocess)
else:
if data == 'resq':
threads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.connect(self.sqls[i], datamix, i, df_name=df_name))
process.start()
threads.append(process)
# logging.info('The process with id {0} is running.'.format(process))
process = Thread(target=self.connect(self.sqls[2], datamix, 1, df_name='resq_ivttby_mix'))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were exported in {0} minutes.'.format(tdelta))
# self.dictdb_df['resq_ivttby_mix'].to_csv('resq_ivttby_mix.csv', sep=',', index=False)
if 'resq_ivttby_mix' in self.dictdb_df.keys():
del self.dictdb_df['resq_ivttby_mix']
treads = []
for i in range(0, len(self.names)):
df_name = self.names[i]
process = Thread(target=self.prepare_df(df=self.dictdb_df[df_name], name=df_name))
process.start()
threads.append(process)
for process in threads:
process.join()
end = time.time()
tdelta = (end-start)/60
logging.info('The database data were prepared in {0} minutes.'.format(tdelta))
self.df = pd.DataFrame()
for i in range(0, len(self.names)):
self.df = self.df.append(self.dict_df[self.names[i]], sort=False)
logging.info("Connection: {0} dataframe has been appended to the resulting dataframe!.".format(self.names[i]))
subject_ids = self.df['Subject ID'].tolist()
duplicates = [item for item, count in collections.Counter(subject_ids).items() if count > 1]
for i in duplicates:
duplicates_rows = self.df[(self.df['Subject ID'] == i) & (~pd.isnull(self.df['crf_parent_name']))]
set_tmp = set(duplicates_rows['Protocol ID'])
if len(set_tmp) == 1:
crfs = duplicates_rows['crf_parent_name'].tolist()
#print(duplicates_rows[['Subject ID', 'Protocol ID']])
for i in crfs:
if 'RESQV12' in i:
keep_crf = i
if 'RESQV20' in i:
keep_crf = i
if 'IVT_TBY' in i and 'DEVCZ10' not in i:
keep_crf = i
index = duplicates_rows.index[duplicates_rows['crf_parent_name'] != keep_crf].tolist()
self.df.drop(index, inplace=True)
#print(duplicates_rows['crf_parent_name'])
#print("Keep form: {0}, deleted row: {1}".format(keep_crf, index))
# Get all country code in dataframe
self.countries = self._get_countries(df=self.df)
# Cal check data function
self.preprocessed_data = self.check_data(self.df, nprocess=nprocess)
#self.preprocessed_data = self.check_data(self.df, nprocess=None)
self.preprocessed_data['RES-Q reports name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['report_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
self.preprocessed_data['ESO Angels name'] = self.preprocessed_data.apply(lambda x: cz_names_dict[x['Protocol ID']]['angels_name'] if 'Czech Republic' in x['Country'] and x['Protocol ID'] in cz_names_dict.keys() else x['Site Name'], axis=1)
##############
# ONSET TIME #
##############
self.preprocessed_data['HOSPITAL_TIME'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_TIME'], format='%H:%M:%S').dt.time
try:
self.preprocessed_data['HOSPITAL_TIMESTAMP'] = self.preprocessed_data.apply(lambda x: datetime.datetime.combine(x['HOSPITAL_DATE'], x['HOSPITAL_TIME']) if not pd.isnull(x['HOSPITAL_TIME']) and not pd.isnull(x['HOSPITAL_DATE']) else None, axis=1)
#self.preprocessed_data['HOSPITAL_TIMESTAMP'] = pd.to_datetime(self.preprocessed_data['HOSPITAL_DATE'] + ' ' + self.preprocessed_data['HOSPITAL_TIME'])
except ValueError as error:
logging.error("Error occured when converting hospital date and time into timestamp object - {}.".format(error))
self.preprocessed_data['VISIT_DATE'] = self.preprocessed_data.apply(lambda x: self.fix_date(x['VISIT_DATE'], x['HOSPITAL_DATE']), axis=1)
self.preprocessed_data['VISIT_TIME'] = | pd.to_datetime(self.preprocessed_data['VISIT_TIME'], format='%H:%M:%S') | pandas.to_datetime |
from os import listdir
import os
from os.path import isfile, join
import csv
import matplotlib.pyplot as plt
from configparser import ConfigParser
import sweetviz
import pandas as pd
import numpy as np
from joblib import dump, load
from sklearn import metrics
from termcolor import colored
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
PROYECT_PATH=os.getcwd()
config = ConfigParser()
config.read('config.cfg')
DATA_PATH = config['DEFAULT']['data_path']
MODEL_PATH = config['DEFAULT']['model_path']
TARGET = config['DEFAULT']['target']
FORMAT = config['DEFAULT']['format']
TEST_SIZE=float(config['DEFAULT']['test_size'])
VALIDATION_SIZE=int(config['DEFAULT']['validation_size'])
COLUMNS_TO_DROP=config['DEFAULT']['columns_to_drop']
COLUMNS_TO_DROP_FUTURE=config['DEFAULT']['columns_to_drop_future']
DENDROMETER_AJUST_VALUE=int(config['DEFAULT']['dendrometer_ajust_value'])
TREE_MAX_DEPTH=int(config['DEFAULT']['tree_max_depth'])
ANOMALY_TCB_POSITIVE=int(config['DEFAULT']['anomaly_TCB_positive'])
ANOMALY_TCB_NEGATIVE=float(config['DEFAULT']['anomaly_TCB_negative'])
ANOMALY_HUMB=int(config['DEFAULT']['anomaly_HUMB'])
ANOMALY_TD=int(config['DEFAULT']['anomaly_TD'])
DEBUG_SENSORS=eval(config['DEFAULT']['debug_sensors'])
model_name='{}{}{}'.format(MODEL_PATH,TARGET,'.joblib')
def from_str_to_array(future):
if(future==0):
return COLUMNS_TO_DROP.split(',')
else:
return COLUMNS_TO_DROP_FUTURE.split(',')
def get_files_with_data():
return [f for f in listdir(DATA_PATH) if (FORMAT in f and not 'lock.' in f)]
def dendrometer_and_battery_cleaner(df,future):
df.drop(from_str_to_array(future), axis = 1, inplace = True)
df=df[df['TD'].notna()]
df['TD']=df['TD'].apply(dendrometer_ajust)
return df
def generate_decision_tree(df,df_columns):
feature_cols = [a for a in df_columns if a not in [TARGET]]
X = df[feature_cols]
y = df[TARGET]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=1)
clf = RandomForestClassifier(max_depth=TREE_MAX_DEPTH,random_state=0)
return clf.fit(X_train,y_train.astype('int')),X_train,X_test,y_train.astype('int'),y_test.astype('int')
def generate_reports(X_train,X_test):
report = sweetviz.compare([X_train, "Train"], [X_test, "Test"], X_train.columns[0])
report.show_html("informe_datos.html",open_browser=False)
def generate_validation_data(df,df_columns):
df_validation=df.sample(n = VALIDATION_SIZE, replace = False)
feature_cols = [a for a in df_columns if a not in [TARGET]]
df_validation_X = df[feature_cols]
df_validation_y = df[TARGET].astype('int')
anomaly_detector(df_validation_X,'Training: ')
anomaly_detector(df_validation,'Prediction: ')
return df.drop(df_validation.index),df_validation_X,df_validation_y
def dendrometer_ajust(dendrometer_value):
return int(str(dendrometer_value)[:DENDROMETER_AJUST_VALUE])
def prepare_dataset(data_block,future=0):
file_path='{}{}{}'.format(PROYECT_PATH,'/data/',data_block)
df=pd.read_excel(file_path, skiprows=1)
df=dendrometer_and_battery_cleaner(df,future)
df=categorization(df)
return ajust_columns(df,future)
def ajust_columns(df,future):
df_columns=df.columns
if(future==1):
df_columns=df_columns.drop(labels=['FECHA'])
df[df_columns] = df[df_columns].apply(pd.to_numeric)
return df,df_columns
def get_predictions(clf,X_test,df_validation_X,y_test,df_validation_y):
y_pred = clf.predict(X_test)
print(colored("Test_Accuracy:",'green'),metrics.accuracy_score(y_test, y_pred))
val_pred = clf.predict(df_validation_X)
print(colored("Validation_Accuracy:",'yellow'),metrics.accuracy_score(df_validation_y, val_pred))
dump(clf, model_name)
def get_predictions_from_saved_model(val):
target_name=model_name.replace('.joblib','').split('_')[1]
dataframe = pd.DataFrame(val, index=[0])
dataframe.drop(target_name, axis = 1, inplace = True)
clf = load(model_name)
y_pred = clf.predict(dataframe)
return y_pred
def anomaly_detector(df,phase):
errors_data=[]
sensor_error_default_text='Error detected on sensor '
arr_options=["TCB","HUMB", "TD"]
if(TARGET not in arr_options):
errors_dict= {
"TCB": 0,
"HUMB": 0,
"TD": 0
}
for index, row in df.iterrows():
if(row['TCB']>ANOMALY_TCB_POSITIVE or float(row['TCB'])<ANOMALY_TCB_NEGATIVE):
sensor_error_TCB='{}{}{}{}{}{}{}{}{}{}'.format(phase,sensor_error_default_text,'TCB on index ',str(index),'.Range should to be between ',ANOMALY_TCB_POSITIVE,' and ',ANOMALY_TCB_NEGATIVE,' but output equals to ',row['TCB'])
errors_dict['TCB']+=1
errors_data.append([index,'TCB',row['TCB']])
print(colored(sensor_error_TCB,'yellow')) if(DEBUG_SENSORS==True) else None
if(row['HUMB']>ANOMALY_HUMB):
sensor_error_HUMB='{}{}{}{}{}{}{}{}'.format(phase,sensor_error_default_text,'HUMB on index ',str(index),'.Range should be under ',ANOMALY_HUMB,' but is ',row['HUMB'])
errors_dict['HUMB']+=1
errors_data.append([index,'HUMB',row['HUMB']])
print(colored(sensor_error_HUMB,'yellow')) if(DEBUG_SENSORS==True) else None
if(ANOMALY_TD>row['TD'] or row['TD']==''):
sensor_error_TD='{}{}{}{}{}{}{}{}'.format(phase,sensor_error_default_text,'TD on index ',str(index),'.Range should be over ',ANOMALY_TD,' but is ',row['TD'])
errors_dict['TD']+=1
errors_data.append([index,'TD',row['TD']])
print(colored(sensor_error_TD,'yellow')) if(DEBUG_SENSORS==True) else None
print(colored('{}{}{}'.format(phase,'Total errors from the device equals to ',str(errors_dict)),'red'))
df_errors= | pd.DataFrame(errors_data,columns = ['Index', 'Error_Type','Value']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Naive_Bayes_Classifier.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1JZwLGwBxEjnbv_8UTqEmmbDgFy_7Te2r
<div class="alert alert-block alert-info" >
<h1>Naive Bayes Classifier </h1>
## Build a spam classifier using Naive Bayes
"""
#Headers
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
"""## Step 1:- Load your data
#### There are three datasets for training: TrainDataset1.csv, TrainDataset2.csv and TrainDataset3.txt. Each dataset contains short messages with the labels (ham or spam). Load the dataset using pandas.
"""
#Load your dataset in this cell
def loadData():
#your code
data1=pd.read_csv(r'TrainDataset1.csv')
data2=pd.read_csv(r'TrainDataset2.csv')
data3 = | pd.read_csv('TrainDataset3.txt', delimiter='\t') | pandas.read_csv |
# 1584927559
import task_submit
# import task_submit_optimus
import task_submit_raw
from task_submit_raw import VGGTask,RESTask,RETask,DENTask,XCETask
import random
import kubernetes
import influxdb
import kubernetes
import signal
from TimeoutException import TimeoutError,Myhandler
import yaml
import requests
from multiprocessing import Process
import multiprocessing
import urllib
import urllib3
import time
import operator
import numpy as np
# from utils import Timer
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
import time
'''
修改:
1.不用给出调度策略
2.初始资源分配不准确
3.初始设置PS和Worker数量随机设置
4资源不调整,节点也不调整了
5.修改缓冲区策略
'''
# from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True) #设置print选项的参数
import os
import json
import math
import pandas as pd
import argparse
import random
import multiprocessing
import time
from pytz import UTC
from dateutil import parser
from datetime import datetime
import psutil
import socket
from max_heap import MaxHeap
import worker_queue
# from worker_queue import value_free_load,value_weight_load
from Global_client import Global_Influx
aToken = '<KEY>'
aTokenw = '<KEY>'
LOSSHOST = '192.168.128.5'
LOSSPORT = 12527
DNA_SIZE = 4
XBOUND = [0.8,2]
XBOUND2 = [0.5,0.95]
YBOUND2 = [0.65,0.95]
YBOUND = [1,3]
CROSSOVER_RATE = 0.8
POP_SIZE = 16
N_GENERATIONS = 8
def load_config(config_file):
# # json串是一个字符串
# f = open('product.json', encoding='utf-8')
# res = f.read()
# product_dic = json.loads(res) # 把json串,变成python的数据类型,只能转换json串内容
# print(product_dic)
# print(product_dic['iphone'])
# # t = json.load(f)
# # print(t) #传一个文件对象,它会帮你直接读json文件,并转换成python数据
# # print(t['iphone'])
# f.close()
f = open(config_file,encoding='utf-8')
res = f.read()
config_content = json.loads(res)
f.close()
return config_content
def save_config(config,filename):
config_content = {}
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def deletehelp(delete_job_name,v1):
try:
v1.delete_namespace(delete_job_name)
except Exception as eeeeee:
print(eeeeee)
command0 = "kubectl get namespace " + delete_job_name + " -o json > /tfdata/tfcnn/deletebuf/" + delete_job_name + ".json"
os.system(command0)
tmp = load_config("/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
tmp["spec"]["finalizers"] = []
save_config(tmp, "/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
try:
command1 = 'curl -k -H "Content-Type: application/json" -X PUT --data-binary @/tfdata/tfcnn/deletebuf/' + delete_job_name + '.json http://127.0.0.1:8081/api/v1/namespaces/'+delete_job_name+'/finalize'
os.system(command1)
except Exception as helpe:
print(helpe)
commandopen = 'kubectl proxy --port=8081'
os.system(commandopen)
os.system(command1)
def deletehelp2(delete_job_name,v1):
v1.delete_namespace(delete_job_name)
command0 = "kubectl get namespace " + delete_job_name + " -o json > /tfdata/tfcnn/deletebuf/" + delete_job_name + ".json"
os.system(command0)
tmp = load_config("/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
tmp["spec"]["finalizers"] = []
save_config(tmp, "/tfdata/tfcnn/deletebuf/" + delete_job_name + ".json")
try:
command1 = 'curl -k -H "Content-Type: application/json" -X PUT --data-binary @/tfdata/tfcnn/deletebuf/' + delete_job_name + '.json http://127.0.0.1:8081/api/v1/namespaces/' + delete_job_name + '/finalize'
os.system(command1)
except Exception as helpe:
print(helpe)
commandopen = 'kubectl proxy --port=8081'
os.system(commandopen)
os.system(command1)
def parse():
parser = argparse.ArgumentParser(description="Node Monitor")
parser.add_argument('--save_path', default='/tfdata/nodedata', help='save path')
parser.add_argument('--database',default="NODEMESSAGE",help="save database")
parser.add_argument('--derivation',default=10,help='sampling rate')
parser.add_argument('--measurement',default="NODEMESSAGE",help="save measurement")
# parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')
# parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')
# parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')
# parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')
args = parser.parse_args()
return args
def update_token():
cacheData = os.popen(
"echo $(kubectl describe secret $(kubectl get secret -n kube-system | grep ^admin-user | awk '{print $1}') -n kube-system | grep -E '^token'| awk '{print $2}')").read()
cacheToken = cacheData[:-1]
newToken = str(cacheToken)
return newToken
def make_headers(Token):
text = 'Bearer ' + Token
headers = {'Authorization': text}
return headers
def catch_message(url):
global aToken
aToken = update_token()
headers = make_headers(aToken)
response = requests.get(url,headers=headers,verify=False)
res_json = response.json()
return res_json
def database_create(databasename):
database_list = Global_Influx.Client_all.get_list_database()
creating = True
for db in database_list:
dbl = list(db.values())
if databasename in dbl:
creating = False
break
if creating:
Global_Influx.Client_all.create_database(databasename)
# Global_Influx.Client_all.create_database(databasename)
def tongji_adjust_number(aim_list):
tongji_wenjian = load_config('modnum.json')
aim_key_lists = list(tongji_wenjian.keys())
for i in aim_list:
if i in aim_key_lists:
tongji_wenjian[i]+=1
else:
tongji_wenjian[i]=1
save_config(tongji_wenjian,'modnum.json')
def tongji_waiting_queue(submit_job_name,time_submit_now):
waiting_time = load_config('waiting_time.json')
waited = list(waiting_time.keys())
if submit_job_name not in waiting_time:
waiting_time[submit_job_name] = time_submit_now
save_config(waiting_time,'waiting_time.json')
def match_cpu(raw_data):
cache = raw_data[:-1]
matched_data = math.ceil(int(cache)/1e6)
return matched_data
def match_memory(raw_data):
cache = raw_data[:-2]
matched_data = math.ceil(int(cache)/1024)
return matched_data
def match_timestamp(raw_data):
EPOCH = UTC.localize(datetime.utcfromtimestamp(0))
timestamp = parser.parse(raw_data)
if not timestamp.tzinfo:
print("XXX")
timestamp = UTC.localize(timestamp)
s = (timestamp - EPOCH).total_seconds()
return int(s)
def generate_item(response,measurement):
node_cpu = {}
node_cpu['k8s-master'] = 64000 - 8000
node_cpu['k8s-worker0'] = 24000 - 400
node_cpu['k8s-worker2'] = 24000 - 400
node_cpu['k8sworker1'] = 16000 - 520
node_cpu['k8s-worker3'] = 24000 - 150
node_cpu['k8s-worker4'] = 24000 - 150
node_cpu['k8s-worker5'] = 24000 - 150
node_cpu['k8s-worker6'] = 16000 - 150
node_cpu['k8s-worker7'] = 16000 - 150
node_cpu['k8s-worker8'] = 16000 - 150
node_cpu['k8s-worker9'] = 16000 - 150
node_cpu['k8s-worker10'] = 16000 - 150
node_cpu['k8s-worker11'] = 24000 - 300
node_cpu['k8s-worker12'] = 16000 - 150
node_cpu['k8s-worker13'] = 16000 - 150
node_cpu['k8s-worker14'] = 16000 - 150
node_cpu['k8s-worker15'] = 16000 - 150
node_cpu['k8s-worker16'] = 16000 - 150
node_cpu['k8s-worker17'] = 24000 - 150
node_cpu['k8s-worker18'] = 16000 - 150
node_cpu['k8s-worker19'] = 32000 - 150
node_cpu['k8s-worker20'] = 24000 - 150
node_memory = {}
node_memory['k8s-master'] = float(251 * 1024 - 32000)
node_memory['k8s-worker0'] = float(94 * 1024 - 4000)
node_memory['k8s-worker2'] = float(94 * 1024 - 3000)
node_memory['k8sworker1'] = float(125 * 1024 - 4500)
node_memory['k8s-worker3'] = float(94 * 1024 - 2200)
node_memory['k8s-worker4'] = float(188 * 1024 - 2200)
node_memory['k8s-worker5'] = float(94 * 1024 - 2200)
node_memory['k8s-worker6'] = float(62 * 1024 - 2000)
node_memory['k8s-worker7'] = float(62 * 1024 - 2000)
node_memory['k8s-worker8'] = float(62 * 1024 - 2000)
node_memory['k8s-worker9'] = float(62 * 1024 - 2000)
node_memory['k8s-worker10'] = float(62 * 1024 - 2000)
node_memory['k8s-worker11'] = float(94 * 1024 - 2200)
node_memory['k8s-worker12'] = float(62 * 1024 - 2000)
node_memory['k8s-worker13'] = float(62 * 1024 - 2000)
node_memory['k8s-worker14'] = float(62 * 1024 - 2000)
node_memory['k8s-worker15'] = float(62 * 1024 - 2000)
node_memory['k8s-worker16'] = float(62 * 1024 - 2000)
node_memory['k8s-worker17'] = float(94 * 1024 - 2000)
node_memory['k8s-worker18'] = float(62 * 1024 - 2000)
node_memory['k8s-worker19'] = float(125 * 1024 - 2000)
node_memory['k8s-worker20'] = float(94 * 1024 - 2000)
points = []
# content = {}
timestamp = response['items'][0]['metadata']['creationTimestamp']
for item in response['items']:
content = {
'measurement': measurement,
'tags':{
"nodes": item['metadata']['name']
},
'fields': {
'cpu': match_cpu(item['usage']['cpu']),
'memory': match_memory(item['usage']['memory']),
'cpu_percent': float(match_cpu(item['usage']['cpu'])/node_cpu[item['metadata']['name']]),
'memory_percent': float(match_memory(item['usage']['memory']) / node_memory[item['metadata']['name']])
},
'time': match_timestamp(timestamp)
}
points.append(content)
return points
def DeletefromDB(Client,DatabaseName):
databases = Client.get_list_database()
for Cn in databases:
if DatabaseName in Cn.values():
Client.drop_database(DatabaseName)
break
class Node_mess(multiprocessing.Process):
def __init__(self,url,args,tasks,v1):
multiprocessing.Process.__init__(self)
self.url = url
self.args = args
self.derivation = args.derivation
self.time_mess = {}
self.cpu_mess = {}
self.memory_mess = {}
self.cpu_per = {}
self.memory_per = {}
self.node_cpu = {}
self.node_cpu['k8s-master'] = 64000 - 8000
self.node_cpu['k8s-worker0'] = 24000 - 400
self.node_cpu['k8s-worker2'] = 24000 - 400
self.node_cpu['k8sworker1'] = 16000 - 520
self.node_cpu['k8s-worker3'] = 24000 - 150
self.node_cpu['k8s-worker4'] = 24000 - 150
self.node_cpu['k8s-worker5'] = 24000 - 150
self.node_cpu['k8s-worker6'] = 16000 - 150
self.node_cpu['k8s-worker7'] = 16000 - 150
self.node_cpu['k8s-worker8'] = 16000 - 150
self.node_cpu['k8s-worker9'] = 16000 - 150
self.node_cpu['k8s-worker10'] = 16000 - 150
self.node_cpu['k8s-worker11'] = 24000 - 300
self.node_cpu['k8s-worker12'] = 16000 - 150
self.node_cpu['k8s-worker13'] = 16000 - 150
self.node_cpu['k8s-worker14'] = 16000 - 150
self.node_cpu['k8s-worker15'] = 16000 - 150
self.node_cpu['k8s-worker16'] = 16000 - 150
self.node_cpu['k8s-worker17'] = 24000 - 150
self.node_cpu['k8s-worker18'] = 16000 - 150
self.node_cpu['k8s-worker19'] = 32000 - 150
self.node_cpu['k8s-worker20'] = 24000 - 150
self.node_memory = {}
self.node_memory['k8s-master'] = float(251 * 1024 - 32000)
self.node_memory['k8s-worker0'] = float(94 * 1024 - 4000)
self.node_memory['k8s-worker2'] = float(94 * 1024 - 3000)
self.node_memory['k8sworker1'] = float(125 * 1024 - 4500)
self.node_memory['k8s-worker3'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker4'] = float(188 * 1024 - 2200)
self.node_memory['k8s-worker5'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker6'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker7'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker8'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker9'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker10'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker11'] = float(94 * 1024 - 2200)
self.node_memory['k8s-worker12'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker13'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker14'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker15'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker16'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker17'] = float(94 * 1024 - 2000)
self.node_memory['k8s-worker18'] = float(62 * 1024 - 2000)
self.node_memory['k8s-worker19'] = float(125 * 1024 - 2000)
self.node_memory['k8s-worker20'] = float(94 * 1024 - 2000)
# self.derivation = derivation
self.arg = args
self.tasks = tasks
self.v1 = v1
self.database = args.database
self.measurement = args.measurement
self.save_path = args.save_path
if not os.path.exists(self.arg.save_path):
os.makedirs(self.arg.save_path)
database_create(self.database)
self.client = influxdb.InfluxDBClient('192.168.128.10',port=8086,username='admin',password='<PASSWORD>',database=self.database)
#derivation
# def node_measurement(self,node_list):
# # Global_Influx.Client_all.get_list_measurements()
def run(self):
print(multiprocessing.current_process().pid)
print(os.getpid())
response = catch_message(self.url)
self.time_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
for item in response['items']:
self.time_mess[item['metadata']['name']] = [item['timestamp']]
self.cpu_mess[item['metadata']['name']] = [match_cpu(item['usage']['cpu'])]
self.memory_mess[item['metadata']['name']] = [match_memory(item['usage']['memory'])]
self.cpu_per[item['metadata']['name']] = [float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']])]
self.memory_per[item['metadata']['name']] = [float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']])]
self.client.write_points(generate_item(response,self.measurement),'s',database=self.database)
time.sleep(self.derivation)
while True:
response = catch_message(self.url)
self.time_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
for item in response['items']:
self.time_mess[item['metadata']['name']].append(item['timestamp'])
self.cpu_mess[item['metadata']['name']].append(match_cpu(item['usage']['cpu']))
self.memory_mess[item['metadata']['name']].append(match_memory(item['usage']['memory']))
self.cpu_per[item['metadata']['name']].append(float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']]))
self.memory_per[item['metadata']['name']].append(float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']]))
self.client.write_points(generate_item(response, self.measurement), 's', database=self.database)
if len(self.time_mess['creation'])%30==0 and len(self.time_mess['creation']) > 0:
data_frame = pd.DataFrame(self.time_mess)
data_frame.to_csv(self.save_path + '/' + 'struct.csv', mode='a+', index=False, sep=',')
print(self.cpu_mess)
print(len(self.cpu_mess))
for keyss in self.cpu_mess:
print(keyss+": "+str(len(self.cpu_mess[keyss])))
data_frame2 = pd.DataFrame(self.cpu_mess)
data_frame2.to_csv(self.save_path + '/' + 'node_cpu.csv', mode='a+', index=False, sep=',')
data_frame3 = pd.DataFrame(self.memory_mess)
data_frame3.to_csv(self.save_path + '/' + 'node_memory.csv', mode='a+', index=False, sep=',')
data_frame4 = pd.DataFrame(self.cpu_per)
data_frame4.to_csv(self.save_path + '/' + 'node_cpu_per.csv', mode='a+', index=False, sep=',')
data_frame5 = pd.DataFrame(self.memory_per)
data_frame5.to_csv(self.save_path + '/' + 'node_memory_per.csv', mode='a+', index=False, sep=',')
f1 = open('/tfdata/nodedata/node.json', 'r', encoding='utf-8')
res = f1.read()
a = json.loads(res)
f1.close()
node_layout = {}
node_list = [i.metadata.name for i in self.v1.list_node().items]
for node in node_list:
node_layout[node] = []
for ns in tasks['ns']:
tmp_layout = tasks['nslayout']
if tmp_layout[ns]:
pod_list = [i for i in self.v1.list_namespaced_pod(ns).items]
for pod in pod_list:
try:
node_layout[pod.spec.node_name].append(pod.metadata.name)
except Exception as e0:
print(e0)
a.append(node_layout)
f2 = open('/tfdata/nodedata/node.json', 'w', encoding='utf-8')
node_json = json.dumps(a, ensure_ascii=False, indent=4) # list转成json,字典转成字符串
f2.write(node_json)
f2.close()
for key in self.time_mess:
self.time_mess[key] = []
self.cpu_mess[key] = []
self.memory_mess[key] = []
self.memory_per[key] = []
self.cpu_per[key] = []
time.sleep(self.derivation)
def get_ns(v1):
ns_list = []
for i in v1.list_namespace().items:
ns_list.append(i.metadata.name)
return ns_list
# def get_layout():
# def get_remain():
def Monitor_job(tasks,lock,v1,jobs):
time.sleep(10)
while True:
if tasks['start'] == False:
break
ns_list = get_ns(v1)
# print(ns_list)
if tasks['start'] == True and tasks['count'] == 0:
time.sleep(30)
pass
else:
for ns in tasks['ns']:
# print(ns+'If in list:'+str(ns in ns_list))
if ns not in ns_list and ns not in tasks['retry'] and not tasks['modulate']:
try_times = 5
while try_times > 0:
time.sleep(float(random.randint(3, 5)))
ns_list = get_ns(v1)
if ns in ns_list:
break
try_times=try_times-1
if try_times <=0 :
lock.acquire()
ns_tmp = tasks['ns']
if ns in ns_tmp:
ns_tmp.remove(ns)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout_keys = list(is_layout.keys())
if ns in is_layout_keys:
is_layout.pop(ns)
tasks['nslayout'] = is_layout
count_tmp = len(ns_tmp)
tasks['count'] = count_tmp
lock.release()
def make_time_query(time_base,mode=0):
if mode == 0:
time_query = (math.floor(time_base-1))
time_query_str = str(time_query)+'000000000'
else:
time_query = (math.ceil(time_base+1))
time_query_str = str(time_query)+'000000000'
return time_query_str
def catch_node_step_msg(jobs,job_name,tasks,lock,batch,flops,params,mode):
node_influx_client = influxdb.InfluxDBClient(host='192.168.128.10',username='admin',password='<PASSWORD>',database='NODEMESSAGE')
step_influx_client = influxdb.InfluxDBClient(host='192.168.128.10',username='admin',password='<PASSWORD>',database='PREDICT')
jieshu = False
kankan = False
lock.acquire()
for jo in jobs:
if jo == job_name:
job = reload_jobs(job_name,-3)
kankan = True
print('reload job success!')
break
lock.release()
if kankan:
job_measure = job.measure
else:
return
print("job measure: %s" % job.measure)
pre_list = job_measure.split(' ')
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_load = pre_list[0] + 'L' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
count = 0
count2 = 0
count111 = 0
while True:
pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job.name).items]
run_result = pd.value_counts(pod_status)
run_result_dict = dict(run_result)
print(run_result_dict)
if 'Running' in pod_status and run_result_dict['Running'] == (job.ps_replicas + job.worker_replicas):
time.sleep(10)
lock.acquire()
print("Select the loayout!")
tmp_layout = tasks['nslayout']
tmp_keys = list(tmp_layout.keys())
if job_name in tmp_keys and tmp_layout[job_name] == False:
tmp_layout_config = {}
for i in job.v1.list_namespaced_pod(job_name).items:
tmp_layout_config[i.metadata.name] = i.spec.node_name
fp = open('/tfdata/k8snfs/' + job_name + '/layout.json', 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fp.write(dicc_json)
fp.close()
tmp_layout[job_name] = True
tasks['nslayout'] = tmp_layout
lock.release()
break
# elif 'Running' in pod_status:
elif 'Succeeded' in pod_status or 'Failed' in pod_status:
jieshu = True
print("Exception exit! Pending Problem!")
lock.acquire()
tmp_reload_ns = tasks['retry']
lock.release()
print("Retrying jobs is:")
print(tmp_reload_ns)
print(not tasks['modulate'])
print(job.name not in tmp_reload_ns and not tasks['modulate'])
try:
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_job_config = load_config(save_res_path)
res_job_config_keys = list(res_job_config.keys())
if 'endtimeraw' not in res_job_config_keys:
res_job_config['endtimeraw'] = time.time() - 10
save_config(res_job_config, save_res_path)
print("save end time success!!")
except Exception as eee:
print("Delete Problem:")
print(eee)
time.sleep(3)
if job.name not in tmp_reload_ns and not tasks['modulate']:
time.sleep(5)
try:
exit_reason = [i.status.container_statuses[0].state.terminated.reason for i in
v1.list_namespaced_pod(job.name).items]
print(exit_reason)
exit_ict = {'reasons': exit_reason}
exit_path = '/tfdata/k8snfs/%s/exit_reason.json' % job.name
exit_json = json.dumps(exit_ict, ensure_ascii=False, indent=4)
fw_exit = open(exit_path, 'w', encoding='utf-8')
fw_exit.write(exit_json)
fw_exit.close()
except Exception as e:
print(e)
time.sleep(3)
# if 'Failed' in pod_status:
# break
lock.acquire()
command = 'kubectl delete -f /tfdata/tfcnn/expjobraw/' + job.name + '.yaml'
os.system(command)
try:
deletehelp2(job.name, v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
tasks['count'] -= 1
# jobs_tmp = jobs
# jobs_tmp.remove(job.name)
# jobs = jobs_tmp
if 'Failed' in pod_status:
fails = tasks['fail']
fails.append(job.name)
tasks['fail'] = fails
finishes = tasks['finish']
finishes.append(job.name)
tasks['finish'] = finishes
print("finish remove %s from jobs!" % job.name)
lock.release()
return
else:
time.sleep(10)
elif 'Pending' in pod_status:
# pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job_name).items]
tmp_layout = tasks['nslayout']
tmp_keys = list(tmp_layout.keys())
if job_name in tmp_keys:
tmp_layout_config = {}
for i in job.v1.list_namespaced_pod(job_name).items:
if i.status.phase == 'Running':
tmp_layout_config[i.metadata.name] = i.spec.node_name
if tmp_layout_config:
fp = open('/tfdata/k8snfs/' + job_name + '/layout.json', 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fp.write(dicc_json)
fp.close()
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
keys_res = res_config.keys()
if 'reloadtime' not in keys_res:
res_config['reloadtime'] = []
if (job.worker_replicas>0 and job.ps_replicas>0):
if count2 >= 5:
aim_steps = step_influx_client.query(
"select training_step from " + measure_t + " order by desc limit 1")
aim_key = aim_steps.keys()
result_inter = aim_steps[aim_key[0]]
result_items = list(result_inter)
aim_step = int(result_items[0]['training_step'])
print(aim_step)
save_job_change_layout(job.name, 1, 1, aim_step, mode=1)
lock.acquire()
command = 'kubectl delete -f /tfdata/tfcnn/expjobraw/' + job.name + '.yaml'
os.system(command)
try:
deletehelp2(job.name,v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
tasks['count'] -= 1
tmp_next = tasks['next']
tmp_next.append(job.name)
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config[job.name] = 0
tasks['nexttimes'] = tmp_next_time_config
tasks['next'] = tmp_next
lock.release()
time.sleep(15)
jieshu = True
print("Exception exit! Pending Problem!")
count2+=1
return
else:
count2 +=1
time.sleep(21.5)
elif not run_result_dict:
ceshi_tmp_ns = tasks['ns']
if job.name not in ceshi_tmp_ns:
if count111 <= 4:
count111 += 1
time.sleep(22)
else:
return
if count >= 8:
jieshu = True
print("Exception exit! Creating Problem!")
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_job_config = load_config(save_res_path)
res_job_config['errtime'] = time.time() - 5
save_config(res_job_config, save_res_path)
lock.acquire()
try:
deletehelp2(job.name, v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
# job_tmp.pop(ns)
# tasks['job'] = job_tmp
tasks['count'] -= 1
lock.release()
return
count+=1
time.sleep(21.5)
else:
time.sleep(21.5)
count11 = 0
count22 = 0
count111 = 0
while True:
# print(b[0].status.container_statuses[0].state.terminated.reason)
# lock.acquire()
tmp_retrys = tasks['retry']
# tmp_reload_ns = tasks['retry']
tmp_retry_solution = tasks['solution']
# lock.release()
solution_keys = list(tmp_retry_solution.keys())
if job.name in tmp_retrys and job.name in solution_keys:
lock.acquire()
tmp_layout = tasks['nslayout']
tmp_layout[job.name] = False
tasks['nslayout'] = tmp_layout
lock.release()
solution = tmp_retry_solution[job.name]
pod_status = [i.status.phase for i in v1.list_namespaced_pod(job.name).items]
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_job_config = load_config(save_res_path)
res_job_config_keys = list(res_job_config.keys())
if (not pod_status) or 'Succeeded' in pod_status or 'Failed' in pod_status:
lock.acquire()
tmp_retry_job = tasks['retry']
tmp_retry_job.remove(job.name)
tasks['retry'] = tmp_retry_job
tmp_retry_solution3 = tasks['solution']
tmp_retry_solution3.pop(job.name)
tasks['solution'] = tmp_retry_solution3
lock.release()
else:
if int(solution['type']) == 1:
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
keys_res = res_config.keys()
if 'reloadtime' not in keys_res:
res_config['reloadtime'] = []
tmp_replicas = job.worker_replicas
aim_steps = step_influx_client.query(
"select training_step from " + measure_t + " order by desc limit 1")
aim_key = aim_steps.keys()
result_inter = aim_steps[aim_key[0]]
result_items = list(result_inter)
aim_step = int(result_items[0]['training_step'])
print(aim_step)
aim_worker_replicas = job.worker_replicas + int(solution['worker'])
if aim_worker_replicas <= 0:
aim_worker_replicas = 1
aim_step = math.ceil((aim_step * tmp_replicas) / (aim_worker_replicas))
aim_ps_replicas = job.ps_replicas + int(solution['ps'])
if aim_ps_replicas <= 0:
aim_ps_replicas = 1
if aim_worker_replicas <= 0:
aim_worker_replicas = 1
save_job_change_layout(job.name, aim_ps_replicas, aim_worker_replicas, aim_step, mode=1)
lock.acquire()
# method1 = {'type': 1, 'ps': 0, 'worker': 1}
time1 = time.time()
job.retry_tf(job.cpu_allocate, job.memory_allocate, aim_step, aim_worker_replicas, aim_ps_replicas)
time2 = time.time()
tmp_retry_job = tasks['retry']
tmp_retry_job.remove(job.name)
tasks['retry'] = tmp_retry_job
tmp_retry_solution3 = tasks['solution']
tmp_retry_solution3.pop(job.name)
tasks['solution'] = tmp_retry_solution3
time.sleep(4)
lock.release()
tmp_reload = res_config['reloadtime']
tmp_reload.append((time2 - time1))
res_config['reloadtime'] = tmp_reload
save_config(res_config, save_res_path)
time.sleep(4.2)
count33 = 0
while count33 < 15:
pod_status3 = [i.status.phase for i in v1.list_namespaced_pod(job.name).items]
run_result3 = pd.value_counts(pod_status3)
run_result_dict3 = dict(run_result3)
print("Retry assignmenting for pods:")
print(run_result_dict3)
if 'Running' in pod_status3 and run_result_dict3['Running'] == (
job.ps_replicas + job.worker_replicas):
break
else:
count33+=1
time.sleep(5.6)
job.write_retry(mode=0)
else:
pod_status2 = [i.status.phase for i in v1.list_namespaced_pod(job.name).items]
run_result2 = pd.value_counts(pod_status2)
run_result_dict2 = dict(run_result2)
print(run_result_dict2)
if 'Running' in pod_status2 and run_result_dict2['Running'] == (job.ps_replicas + job.worker_replicas):
time.sleep(6)
lock.acquire()
print("Select the loayout!")
tmp_layout = tasks['nslayout']
lock.release()
tmp_keys = list(tmp_layout.keys())
if job_name in tmp_keys and tmp_layout[job_name] == False:
tmp_layout_config = {}
for i in job.v1.list_namespaced_pod(job_name).items:
tmp_layout_config[i.metadata.name] = i.spec.node_name
fp = open('/tfdata/k8snfs/' + job_name + '/layout.json', 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fp.write(dicc_json)
fp.close()
tmp_layout[job_name] = True
lock.acquire()
tasks['nslayout'] = tmp_layout
lock.release()
else:
time.sleep(10)
elif ('Succeeded' in pod_status2 or 'Failed' in pod_status2):
# # print(b[0].status.container_statuses[0].state.terminated.reason)
# pod_status = [i.status.phase for i in v1.list_namespaced_pod(ns).items]
# ['OOMKilled']
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_job_config = load_config(save_res_path)
res_job_config_keys = list(res_job_config.keys())
if 'endtimeraw' not in res_job_config_keys:
res_job_config['endtimeraw'] = time.time() - 10
save_config(res_job_config, save_res_path)
time.sleep(3)
if (job.name not in tmp_retrys) and not tasks['modulate']:
try:
exit_reason = [i.status.container_statuses[0].state.terminated.reason for i in
v1.list_namespaced_pod(job.name).items]
print(exit_reason)
exit_ict = {'reasons': exit_reason}
exit_path = '/tfdata/k8snfs/%s/exit_reason.json' % job.name
exit_json = json.dumps(exit_ict, ensure_ascii=False, indent=4)
fw_exit = open(exit_path, 'w', encoding='utf-8')
fw_exit.write(exit_json)
fw_exit.close()
except Exception as e:
print(e)
time.sleep(5)
lock.acquire()
command = 'kubectl delete -f /tfdata/tfcnn/expjobraw/' + job.name + '.yaml'
os.system(command)
print("delete this job %s!!!" % job.name)
try:
deletehelp2(job.name,v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
tasks['count'] -= 1
if 'Failed' in pod_status2:
fails = tasks['fail']
fails.append(job.name)
tasks['fail'] = fails
finishes = tasks['finish']
finishes.append(job.name)
tasks['finish'] = finishes
lock.release()
break
elif 'Pending' in pod_status2:
# pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job_name).items]
tmp_layout = tasks['nslayout']
tmp_keys = list(tmp_layout.keys())
if job_name in tmp_keys:
tmp_layout_config = {}
for i in job.v1.list_namespaced_pod(job_name).items:
if i.status.phase == 'Running':
tmp_layout_config[i.metadata.name] = i.spec.node_name
if tmp_layout_config:
fp = open('/tfdata/k8snfs/' + job_name + '/layout.json', 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fp.write(dicc_json)
fp.close()
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
keys_res = res_config.keys()
if 'reloadtime' not in keys_res:
res_config['reloadtime'] = []
if (job.worker_replicas>0 and job.ps_replicas>0):
if count22 >= 5:
aim_steps = step_influx_client.query(
"select training_step from " + measure_t + " order by desc limit 1")
aim_key = aim_steps.keys()
result_inter = aim_steps[aim_key[0]]
result_items = list(result_inter)
aim_step = int(result_items[0]['training_step'])
print(aim_step)
save_job_change_layout(job.name, 1, 1, aim_step, mode=1)
lock.acquire()
command = 'kubectl delete -f /tfdata/tfcnn/expjobraw/' + job.name + '.yaml'
os.system(command)
try:
deletehelp2(job.name, v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
tasks['count'] -= 1
tmp_next = tasks['next']
tmp_next.append(job.name)
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config[job.name] = 0
tasks['nexttimes'] = tmp_next_time_config
tasks['next'] = tmp_next
lock.release()
time.sleep(15)
jieshu = True
print("Exception exit! Pending Problem!")
count22+=1
return
else:
count22 += 1
time.sleep(21.5)
elif not run_result_dict2:
ceshi_tmp_ns = tasks['ns']
if job.name not in ceshi_tmp_ns:
if count111 <= 5:
count111 += 1
time.sleep(15)
else:
return
if count11 >= 8:
jieshu = True
print("Exception exit! Creating Problem!")
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_job_config = load_config(save_res_path)
res_job_config['errtime'] = time.time() - 5
save_config(res_job_config, save_res_path)
lock.acquire()
command = 'kubectl delete -f /tfdata/tfcnn/expjobraw/' + job.name + '.yaml'
os.system(command)
try:
deletehelp2(job.name,v1)
except Exception as we0:
print(we0)
# v1.delete_namespace(job.name)
ns_tmp = tasks['ns']
ns_tmp.remove(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(job.name)
for i in range(len(jobs)):
if jobs[i] == job.name:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
# job_tmp.pop(ns)
# tasks['job'] = job_tmp
tasks['count'] -= 1
lock.release()
return
count11 += 1
time.sleep(21)
else:
time.sleep(15)
# 1580976233000000000
def get_load_value(node_index,cpu_base,memory_base,total_cpu_base,total_memory_base):
keys = node_index.keys()
alpha = 0.78
cpu_score = 0
memory_score = 0
node_use = []
node_cpu = []
node_mem = []
for key in keys:
if node_index[key] <=12:
node_use.append(key)
for key in node_use:
cpu_score+= cpu_base[key]
node_cpu.append(cpu_base[key])
memory_score+= memory_base[key]
node_mem.append(memory_base[key])
cpu_score = cpu_score/len(node_use)
memory_score = memory_score/len(node_use)
cpu_score = alpha*cpu_score+(1-alpha)*total_cpu_base
memory_score = alpha*memory_score+(1-alpha)*total_memory_base
return cpu_score,memory_score,node_cpu,node_mem
def check_path(name):
train_dir = os.path.join('/tfdata/k8snfs/', name)
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
return train_dir
def write_step_meg(job_name):
job = reload_jobs(job_name,-3)
measure = job.measure
client_pre = influxdb.InfluxDBClient(host=job.dbhost, port=8086, username='admin', password='<PASSWORD>',
database="PREDICT")
pre_list = measure.split(" ")
# measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
step_items = [
{
'measurement': measure_t,
'tags': {
'task': job.task_id,
'runtimes': job.rtimes,
'retry': job.retry
},
'fields': {
'training_step': job.training_step,
'worker': job.worker_replicas,
'ps': job.ps_replicas
}
}
]
client_pre.write_points(step_items, time_precision="ms", database="PREDICT")
print('write initial measure_t success!!')
job.write_retry(mode=0)
def loss_server_conn(ADDR,measure):
loss_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
loss_client.connect(ADDR)
loss_client.send(bytes(measure, 'utf-8'))
connect_try = 5
try_times = 1
connected = False
while True:
if try_times > connect_try:
break
msg_from_server = loss_client.recv(4096)
if not msg_from_server:
break
msg_from_server_str = str(msg_from_server.decode('utf-8'))
msg_from_server_list = msg_from_server_str.split(" ")
if msg_from_server_list[0] == '400':
connected = True
break
loss_client.send(bytes(measure, 'utf-8'))
try_times = try_times + 1
return connected
def Submit_job(tasks,lock,v1,jobs):
try:
rfr = joblib.load('rfr_batch.pkl')
except Exception as e0:
print(e0)
# est_mem = joblib.load('est_mem.pkl')
# est_cpu = joblib.load('est_cpu.pkl')
print('start to reload')
print(jobs)
global LOSSHOST,LOSSPORT
ADDR = (LOSSHOST,LOSSPORT)
PREHOST = '192.168.128.5'
PREPORT = 12529
ADDR2 = (PREHOST,PREPORT)
max_buffer_size = 5
job_basic = reload_jobs(tasks['last'],-1)
max_free_heap = MaxHeap(max_size=max_buffer_size,fn=worker_queue.value_free_load)
max_wight_heap = MaxHeap(max_size=max_buffer_size,fn=worker_queue.value_weight_load)
worker_buffer = tasks['buffer']
first_reload = False
time.sleep(20)
if worker_buffer:
first_reload = True
pool = multiprocessing.Pool(processes=38)
for job0 in jobs:
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job0, job0)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
job01 = reload_jobs(job0,-3)
# catch_node_step_msg(jobs=,job_name=,tasks=,lock=,batch=,flops=,params=,mode=)
# loss_server_conn(ADDR,job01.measure)
pool.apply_async(catch_node_step_msg,args=(jobs,job0,tasks,lock,batch_res,flops_res,params_res,-1))
global_count = 1
while True:
print("Global Count is :%d" % global_count)
if tasks['start']==False:
break
tmp_aim_set = tasks['aim']
tmp_next_set = tasks['next']
tmp_ns_set = tasks['ns']
# tmp_buffer_set = tasks
if not tmp_aim_set and not tmp_next_set and tasks['buffercount'] <= 0:
if not tmp_ns_set:
break
if global_count >4 and global_count%10 != 0:
lock.acquire()
counts = tasks['count']
bufer_count = tasks['buffercount']
lock.release()
print(bufer_count)
if tasks['next'] and counts < tasks['size']:
print("panduan tasks in next")
lock.acquire()
tmp_next = tasks['next']
job_name = tmp_next.pop(0)
job_next_time = tasks['nexttimes'][job_name]
job_next_time+=1
tasks['nexttimes'][job_name] = job_next_time
tasks['next'] = tmp_next
lock.release()
job = reload_jobs(job_name, -3)
print("%s in next reload!" % job_name)
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
# mem_need = job.total_mem * total_mem_use + job.worker_replicas * job.memory_allocate + 2048 * job.ps_replicas
# cpu_need = job.total_cpu * total_cpu_use + job.worker_replicas * job.cpu_allocate + 1000 * job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
count_trys = 0
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 600 > 0:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
if can_use_cpu - job.cpu_allocate > 0:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 600 > 0:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= 1 and catch_worker >= 1:
break
print("In next catch ps: %d,worker:%d" % (catch_ps, catch_worker))
if catch_ps > 0 and catch_worker > 0:
lock.acquire()
job.update_step()
print("in next update step success!!")
write_step_meg(job.name)
submit_time_now = time.time()
tongji_waiting_queue(job.name, submit_time_now)
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
# tasks['next'] = ''
lock.release()
else:
lock.acquire()
tmp_buffer_count0 = tasks['buffercount']
if tasks['nexttimes'][job.name] >=3 and tmp_buffer_count0 < max_buffer_size:
tmp_buffer_pool = tasks['buffer']
tmp_buffer_pool.append(job.name)
tasks['buffer'] = tmp_buffer_pool
tmp_buffer_count0+=1
tasks['buffercount'] = tmp_buffer_count0
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config.pop(job.name)
tasks['nexttimes'] = tmp_next_time_config
else:
tmp_next = tasks['next']
tmp_next.append(job.name)
tasks['next'] = tmp_next
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
# save_res_path = '/tfdata/tfcnn'
res_config = load_config(save_res_path)
lock.release()
time.sleep(30)
elif counts < tasks['size'] and bufer_count>0:
lock.acquire()
tmp_buffer_count = tasks['buffercount']
worker_buffer = tasks['buffer']
lock.release()
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
cpu_value, mem_value, cpu_node_value, mem_node_value = get_load_value(node_index=node_index,
cpu_base=cpu_nodes,
memory_base=memory_nodes,
total_cpu_base=total_cpu_use,
total_memory_base=total_mem_use)
if cpu_value < 0.4:
selected_job_name = worker_buffer[0]
print(selected_job_name)
ceshi_name = worker_buffer.pop(0)
print(ceshi_name)
# worker_buffer = max_free_heap.items
print(worker_buffer)
tasks['buffer'] = worker_buffer[:]
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
else:
selected_job_name = worker_buffer[0]
print(selected_job_name)
ceshi_name = worker_buffer.pop(0)
print(ceshi_name)
# worker_buffer = max_free_heap.items
print(worker_buffer)
tasks['buffer'] = worker_buffer[:]
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
job = reload_jobs(selected_job_name, -3)
tmp_ps_replicas = job.ps_replicas
tmp_worker_replicas = job.worker_replicas
pre_list = job.measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
influx_client = influxdb.InfluxDBClient(host='192.168.128.10', port=8086, username='admin',
password='<PASSWORD>',
database="PREDICT")
result = influx_client.query("select * from " + measure_t + " order by asc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_worker_replicas = int(result_items[0]['worker'])
job.training_step = math.ceil(
trains_step * tmp_worker_replicas / job.worker_replicas)
save_job_change_layout(job.name, job.ps_replicas, job.worker_replicas,
job.training_step)
mem_need = job.total_mem * total_mem_use + job.worker_replicas * job.memory_allocate + 2048 * job.ps_replicas
cpu_need = job.total_cpu * total_cpu_use + job.worker_replicas * job.cpu_allocate + 750 * job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
reach_ps = False
reach_worker = False
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 600 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 600 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= job.ps_replicas:
reach_ps = True
if catch_worker >= job.worker_replicas:
reach_worker = True
if catch_ps >= job.ps_replicas and catch_worker >= job.worker_replicas:
break
print("catch_ps: %d catch_worker: %d" % (catch_ps, catch_worker))
if catch_ps < job.ps_replicas or catch_worker < job.worker_replicas:
tmp_ps = job.ps_replicas
tmp_worker = job.worker_replicas
if catch_ps > 0 and catch_worker > 0:
if catch_worker > aim_job.worker_replicas:
catch_worker = aim_job.worker_replicas
if catch_ps > aim_job.ps_replicas:
catch_ps = aim_job.ps_replicas
aim_job.ps_replicas = catch_ps
aim_job.worker_replicas = catch_worker
aim_job.training_step = math.ceil(aim_job.training_step * tmp_worker / aim_job.worker_replicas)
save_job_change_layout(aim_job.name, catch_ps, catch_worker, aim_job.training_step)
aim_job.update_step()
write_step_meg(aim_job.name)
lock.acquire()
submit_time_now = time.time()
tongji_waiting_queue(aim_job.name, submit_time_now)
aim_job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(aim_job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[aim_job.name] = False
tasks['nslayout'] = is_layout
jobs.append(aim_job.name)
tasks['count'] += 1
lock.release()
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (aim_job.name, aim_job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, aim_job.name, tasks, lock, batch_res, flops_res, params_res, 1))
else:
job.ps_replicas = 1
job.worker_replicas = 1
job.training_step = math.ceil(job.training_step * tmp_worker)
save_job_change_layout(job.name, 1, 1, training_step=job.training_step)
lock.acquire()
tmp_next = tasks['next']
tmp_next.append(job.name)
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config[job.name] = 0
tasks['nexttimes'] = tmp_next_time_config
tasks['next'] = tmp_next
lock.release()
else:
lock.acquire()
job.update_step()
write_step_meg(job.name)
submit_time_now = time.time()
tongji_waiting_queue(job.name, submit_time_now)
job.create_tf()
# lock.acquire()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
lock.release()
global_count+=1
time.sleep((83.3/3)*1.1)
if global_count % 10 == 0 or global_count<=4:
print('start to submit a job!!')
tmp_jobs0 = tasks['aim']
if not tmp_jobs0:
global_count+=1
continue
for _ in range(10):
lock.acquire()
counts = tasks['count']
bufer_count = tasks['buffercount']
lock.release()
if (counts >= tasks['size']) and (bufer_count >= max_buffer_size):
time.sleep(float(random.randint(7,9)))
pass
else:
print('select a job!!')
time.sleep(40)
if tasks['aim']:
tmp_jobs = tasks['aim']
aim_job0 = tmp_jobs[0]
tmp_jobs.pop(0)
tasks['aim'] = tmp_jobs
aim_job = reload_jobs(aim_job0,-1)
aim_job.retry = aim_job.retry+1
save_job_path = '/tfdata/k8snfs/%s/%s.json' % (aim_job.name, aim_job.name)
aim_job_config = load_config(save_job_path)
aim_job_config['retry'] = aim_job.retry
save_config(aim_job_config,save_job_path)
pre_list = aim_job.measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
measure_write = pre_list[0] + 'W' + pre_list[-1]
measure_up = pre_list[0] + 'U' + pre_list[-1]
# ps_r = random.randint(1, 3)
# worker_r = random.randint(1, 4)
allow_read = {}
allow_read['OK'] = True
allow_read['retry'] = aim_job.retry
# allow_p = check_path(measure_t)
allow_path = '/tfdata/k8snfs/%s/%s3.json' % (aim_job.name, measure_t)
save_config(allow_read, allow_path)
lock.acquire()
tasks['base'] = aim_job.name
lock.release()
# template_id = random.randint(1,4)
else:
break
lock.acquire()
if tasks['count'] < tasks['size'] or tasks['buffercount'] < max_buffer_size:
# loss_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# loss_client.connect(ADDR)
client_pre = influxdb.InfluxDBClient(host=aim_job.dbhost, port=8086, username='admin',
password='<PASSWORD>',
database="PREDICT")
save_config_dir = task_submit.check_path(aim_job.name)
save_job_path = '/tfdata/k8snfs/%s/%s.json' % (aim_job.name, aim_job.name)
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (aim_job.name, aim_job.name)
aim_res_config = load_config(save_res_path)
pre_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pre_client.connect(ADDR2)
pre_client.send(bytes(aim_job.measure, 'utf-8'))
connect_try = 5
try_times = 1
connected = False
msg_from_server_str = ''
start_time = '%.3f' % time.time()
start_time = float(start_time)
if aim_job.template_id == 1:
dict0 = {'batch': aim_job.batch_size, 'channel1': aim_job.channel1, 'channel2': aim_job.channel2, 'channel3': aim_job.channel3,
'channel4': aim_job.channel4,
'channel5': aim_job.channel5, 'num_layer1': aim_job.num_layer1, 'num_layer2': aim_job.num_layer2,
'num_layer3': aim_job.num_layer3, 'num_layer4': aim_job.num_layer4, 'num_layer5': aim_job.num_layer5}
elif aim_job.template_id == 2:
dict0 = {'batch': aim_job.batch_size, 'channel1': aim_job.channel1, 'channel2': aim_job.channel2, 'channel3': aim_job.channel3,
'channel4': aim_job.channel4,
'layer1': aim_job.layer1, 'layer2': aim_job.layer2,
'layer3': aim_job.layer3, 'layer4': aim_job.layer4, 'bottle': aim_job.bottle}
elif aim_job.template_id == 3:
dict0 = {'batch': aim_job.batch_size, 'channel1': aim_job.channel1, 'channel2': aim_job.channel2, 'channel3': aim_job.channel3,
'channel4': aim_job.channel4, 'stack_num': aim_job.stack}
elif aim_job.template_id == 4:
dict0 = {'batch':aim_job.batch_size, 'channel1': aim_job.channel1, 'channel2': aim_job.channel2, 'channel3': aim_job.channel3,
'channel4': aim_job.channel4, 'channel5': aim_job.channel5, 'channel6': aim_job.channel6,
'channel7': aim_job.channel7,
'channel8': aim_job.channel8, 'repeat': aim_job.repeat}
else:
dict0 = {'batch': aim_job.batch_size, 'BC': aim_job.BC, 'k': aim_job.k, 'L':aim_job.L, 'num_classes': 10}
while True:
if try_times > connect_try:
break
msg_from_server = pre_client.recv(4096)
if not msg_from_server:
break
msg_from_server_str = str(msg_from_server.decode('utf-8'))
msg_from_server_list = msg_from_server_str.split(" ")
if msg_from_server_list[0] == '400':
connected = True
break
pre_client.send(bytes(aim_job.measure, 'utf-8'))
try_times = try_times + 1
if not connected:
print("Connected or send message error!")
pre_client.close()
lock.release()
continue
print(msg_from_server_str)
print("connected success!")
dict_json = json.dumps(dict0)
pre_client.send(bytes(dict_json, 'utf-8'))
ress = pre_client.recv(4096)
ress_str = str(ress.decode('utf-8'))
ress_lists = ress_str.split(' ')
if ress_lists[0] == '400':
batch_res = int(ress_lists[1])
flops_res = int(ress_lists[2])
params_res = int(ress_lists[3])
cpu_predict = float(ress_lists[-2])
cpu_base = math.ceil(1.12 * cpu_predict)
mem_predict = float(ress_lists[-1])
mem_base = math.ceil(1.35 * mem_predict)
res_to_server = '1'
pre_client.send(bytes(res_to_server, 'utf-8'))
else:
res_to_server = '0'
pre_client.send(bytes(res_to_server, 'utf-8'))
print("send response success!!")
time.sleep(6)
pre_client.close()
print("some time later to try again!!")
lock.release()
time.sleep(60)
continue
pre_client.close()
tmp_reload = tasks['reload']
if tmp_reload == 0:
# alpha = 1
# beta = 1
alpha = random.randint(1, 14) * 0.1 + 0.6
beta = random.randint(2, 16) * 0.1 + 0.625
else:
alpha = random.randint(1, 14) * 0.1 + 0.6
beta = random.randint(2, 16) * 0.1 + 0.625
# alpha = 1
# beta = 1
aim_job.set_resource(cpu_source=(math.ceil(aim_res_config['cpu_high'] * alpha)),
mem_source=(math.ceil(aim_res_config['memory_base'] * beta)))
deadline = float(aim_res_config['deadline'])
print(deadline)
print(type(deadline))
# deadline = random.randint(3600, 18000)
aim_job.set_deadline(deadline=deadline, start_time=start_time)
aim_res_config['deadline'] = aim_job.deadline
aim_res_config['start_time3'] = aim_job.starttime
aim_res_config['cpu_source'] = aim_job.cpu_allocate
aim_res_config['mem_source'] = aim_job.memory_allocate
save_config_dir = task_submit_raw.check_path(aim_job.name)
# save_job_path = '/tfdata/k8snfs/%s/%s.json' % (job.name, job.name)
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (aim_job.name, aim_job.name)
# save_config(job_config, save_job_path)
save_config(aim_res_config, save_res_path)
if tasks['count'] < tasks['size'] and tasks['buffercount'] == 0:
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
cpu_value, mem_value, cpu_node_value, mem_node_value = get_load_value(node_index=node_index,
cpu_base=cpu_nodes,
memory_base=memory_nodes,
total_cpu_base=total_cpu_use,
total_memory_base=total_mem_use)
aim_job.worker_replicas = random.randint(1,6)
aim_job.ps_replicas = random.randint(1,4)
influx_client = influxdb.InfluxDBClient(host='192.168.128.10', port=8086, username='admin',
password='<PASSWORD>',
database="PREDICT")
result = influx_client.query("select * from " + measure_t + " order by asc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_worker_replicas = int(result_items[0]['worker'])
aim_job.training_step = math.ceil(trains_step * tmp_worker_replicas / aim_job.worker_replicas)
save_job_change_layout(aim_job.name, aim_job.ps_replicas, aim_job.worker_replicas, aim_job.training_step)
mem_need = aim_job.total_mem * total_mem_use + aim_job.worker_replicas * aim_job.memory_allocate + 2048 * aim_job.ps_replicas
cpu_need = aim_job.total_cpu * total_cpu_use + aim_job.worker_replicas * aim_job.cpu_allocate + 750 * aim_job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
reach_ps = False
reach_worker = False
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = aim_job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = aim_job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 600> 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
if can_use_cpu - aim_job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - aim_job.cpu_allocate
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - aim_job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - aim_job.memory_allocate
first_try = False
else:
if can_use_cpu - aim_job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - aim_job.cpu_allocate
else:
if can_use_cpu - 600 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
endcpu = True
if can_use_mem - aim_job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - aim_job.memory_allocate
else:
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= aim_job.ps_replicas:
reach_ps = True
if catch_worker >= aim_job.worker_replicas:
reach_worker = True
if catch_ps >= aim_job.ps_replicas and catch_worker >= aim_job.worker_replicas:
break
print("catch_ps: %d catch_worker: %d" % (catch_ps, catch_worker))
if catch_ps < aim_job.ps_replicas or catch_worker < aim_job.worker_replicas:
tmp_ps = aim_job.ps_replicas
tmp_worker = aim_job.worker_replicas
if catch_ps > 0 and catch_worker > 0:
if catch_worker > aim_job.worker_replicas:
catch_worker = aim_job.worker_replicas
if catch_ps > aim_job.ps_replicas:
catch_ps = aim_job.ps_replicas
aim_job.ps_replicas = catch_ps
aim_job.worker_replicas = catch_worker
aim_job.training_step = math.ceil(aim_job.training_step * tmp_worker / aim_job.worker_replicas)
save_job_change_layout(aim_job.name, catch_ps, catch_worker, aim_job.training_step)
aim_job.update_step()
write_step_meg(aim_job.name)
submit_time_now = time.time()
tongji_waiting_queue(aim_job.name, submit_time_now)
aim_job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(aim_job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[aim_job.name] = False
tasks['nslayout'] = is_layout
jobs.append(aim_job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (aim_job.name, aim_job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, aim_job.name, tasks, lock, batch_res, flops_res, params_res, 1))
else:
aim_job.ps_replicas = 1
aim_job.worker_replicas = 1
aim_job.training_step = aim_job.training_step * tmp_worker
save_job_change_layout(aim_job.name, 1, 1, aim_job.training_step)
# lock.acquire()
tmp_next = tasks['next']
tmp_next.append(aim_job.name)
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config[aim_job.name] = 0
tasks['nexttimes'] = tmp_next_time_config
tasks['next'] = tmp_next
else:
aim_job.update_step()
write_step_meg(aim_job.name)
submit_time_now = time.time()
tongji_waiting_queue(aim_job.name, submit_time_now)
aim_job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(aim_job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[aim_job.name] = False
tasks['nslayout'] = is_layout
jobs.append(aim_job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (aim_job.name, aim_job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, aim_job.name, tasks, lock, batch_res, flops_res, params_res, 1))
elif tasks['count'] >= tasks['size']:
worker_buffer = tasks['buffer']
worker_buffer.append(aim_job.name)
tasks['buffer'] = worker_buffer
tmp_buffer_count = tasks['buffercount']
tmp_buffer_count = tmp_buffer_count + 1
tasks['buffercount'] = tmp_buffer_count
else:
worker_buffer = tasks['buffer']
worker_buffer.append(aim_job.name)
tmp_buffer_count = tasks['buffercount']
tmp_buffer_count = tmp_buffer_count + 1
tasks['buffercount'] = tmp_buffer_count
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
cpu_value, mem_value, cpu_node_value, mem_node_value = get_load_value(node_index=node_index,
cpu_base=cpu_nodes,
memory_base=memory_nodes,
total_cpu_base=total_cpu_use,
total_memory_base=total_mem_use)
if cpu_value < 0.4:
tmp_buffers = tasks['buffer']
selected_job_name = tmp_buffers[0]
print(selected_job_name)
# worker_buffer = max_free_heap.items
print(worker_buffer)
ceshi_name = worker_buffer.pop(0)
print(ceshi_name)
tasks['buffer'] = worker_buffer[:]
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
else:
selected_job_name = worker_buffer[0]
ceshi_name = worker_buffer.pop(0)
print(ceshi_name)
print(worker_buffer)
print(selected_job_name)
tasks['buffer'] = worker_buffer[:]
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
job = reload_jobs(selected_job_name, -3)
tmp_ps_replicas = job.ps_replicas
tmp_worker_replicas = job.worker_replicas
pre_list = job.measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
influx_client = influxdb.InfluxDBClient(host='192.168.128.10', port=8086, username='admin',
password='<PASSWORD>',
database="PREDICT")
result = influx_client.query("select * from " + measure_t + " order by asc limit 1")
key = result.keys()
print(key)
result_inter = result[key[0]]
result_items = list(result_inter)
print(result_items)
trains_step = int(result_items[0]['training_step'])
tmp_worker_replicas = int(result_items[0]['worker'])
job.training_step = math.ceil(
trains_step * tmp_worker_replicas / job.worker_replicas)
save_job_change_layout(job.name, job.ps_replicas, job.worker_replicas,
job.training_step)
mem_need = job.total_mem * total_mem_use + job.worker_replicas * job.memory_allocate + 2048 * job.ps_replicas
cpu_need = job.total_cpu * total_cpu_use + job.worker_replicas * job.cpu_allocate + 750 * job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
reach_ps = False
reach_worker = False
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 600 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 600 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 600
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= job.ps_replicas:
reach_ps = True
if catch_worker >= job.worker_replicas:
reach_worker = True
if catch_ps >= job.ps_replicas and catch_worker >= job.worker_replicas:
break
print("catch_ps: %d catch_worker: %d" % (catch_ps, catch_worker))
if catch_ps < job.ps_replicas or catch_worker < job.worker_replicas:
tmp_ps = job.ps_replicas
tmp_worker = job.worker_replicas
if catch_ps > 0 and catch_worker > 0:
if catch_worker > job.worker_replicas:
catch_worker = job.worker_replicas
if catch_ps > job.ps_replicas:
catch_ps = job.ps_replicas
job.ps_replicas = catch_ps
job.worker_replicas = catch_worker
job.training_step = math.ceil(job.training_step * tmp_worker / job.worker_replicas)
save_job_change_layout(job.name, catch_ps, catch_worker, job.training_step)
job.update_step()
write_step_meg(job.name)
submit_time_now = time.time()
tongji_waiting_queue(job.name, submit_time_now)
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, job.name, tasks, lock, batch_res, flops_res, params_res,
1))
else:
job.ps_replicas = 1
job.worker_replicas = 1
job.training_step = job.training_step = math.ceil(job.training_step * tmp_worker)
save_job_change_layout(job.name, 1, 1, training_step=job.training_step)
tmp_next = tasks['next']
tmp_next.append(job.name)
tmp_next_time_config = tasks['nexttimes']
tmp_next_time_config[job.name] = 0
tasks['nexttimes'] = tmp_next_time_config
tasks['next'] = tmp_next
# lock.release()
else:
job.update_step()
write_step_meg(job.name)
submit_time_now = time.time()
tongji_waiting_queue(job.name, submit_time_now)
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
pool.apply_async(catch_node_step_msg,
args=(
jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
tmp_reload = tasks['reload']
tmp_reload = 0
tasks['reload'] = tmp_reload
lock.release()
break
global_count += 1
def jiance(tasks,lock,v1):
try:
task2 = load_config('through.json')
# aa = 0
except Exception as eee:
print(eee)
# aa = 0
while True:
if tasks['start']==True:
time.sleep(120)
lock.acquire()
tmp_count1 = 0
for ns in tasks['ns']:
nss = get_ns(v1)
if ns not in nss:
continue
pod_status2 = [i.status.phase for i in v1.list_namespaced_pod(ns).items]
save_path = '/tfdata/k8snfs/%s/%s.json' % (ns,ns)
ns_config = load_config(save_path)
run_result2 = | pd.value_counts(pod_status2) | pandas.value_counts |
#Merges two CSV files and saves the final result
import pandas as pd
import sys
df1 = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
import json
from datetime import datetime
import pandas as pd
from autogluon import TabularPrediction as task
data_path = "./data/plasma/plasma"
label_column = "RETPLASMA"
fold1 = pd.read_csv(data_path + "-fold1.csv")
fold2 = pd.read_csv(data_path + "-fold2.csv")
fold3 = | pd.read_csv(data_path + "-fold3.csv") | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 08:48:39 2020
@author: cclark2
"""
import numpy as np
import math
from scipy.interpolate import interp2d
from scipy.optimize import fsolve
import pandas as pd
import os
import struct
import multiprocessing #import Pool
from itertools import repeat
import dill
import mpi4py.MPI as MPI
from mpi4py.futures import MPIPoolExecutor #executor map
MPI.pickle.__init__(dill.dumps, dill.loads)
num_cores = multiprocessing.cpu_count()
#%% READ BINARY FILES AND CONCATENATE SEEDS
class Surrogate():
"""
Base drivetrain class that calculates forces and L10 lifetime for planet bearings.
"""
def __init__(self, FF_timestep, m_c, d_c, m_s, m_p, N, g, beta, L_c, L_s, L_p, rho, C, e, N_r, N_p, omega):
'''Instantiate LayoutOptimization object and parameter values.'''
self.FF_timestep = FF_timestep # FAST.Farm timestep for outputs
self.m_c = m_c # carrier mass
self.d_c = d_c # center distance
self.m_s = m_s # shaft mass
self.m_p = m_p # planet bearing mass
self.N = N # number of planet bearings
self.g = g # gravitational force
self.beta = beta # mounting angle
self.L_c = L_c # distance from main bearing to the carrier's center of gravity
self.L_s = L_s # distance from main bearing to the main shaft's center of gravity
self.L_p = L_p # distance from main bearing to the planet bearing's center of gravity
self.rho = rho # bedplate tilting angle (if don't want to include, set to 0 degrees)
self.C = C # bearing basic dynamic load rating or capacity, N (the load that a bearing can carry for 1 million inner-race revolutions with a 90% probability of survival)
self.e = e # constant for roller bearings
self.N_r = N_r # ring gear teeth (#)
self.N_p = N_p # planet gear teeth (#)
self.omega = omega # bearing mount
def fread(self, fid, n, type):
fmt, nbytes = {'uint8': ('B', 1), 'int16':('h', 2), 'int32':('i', 4), 'float32':('f', 4), 'float64':('d', 8)}[type]
return struct.unpack(fmt * n, fid.read(nbytes * n))
def load_binary_output(self, filename):
'''Ported from ReadFASTbinary.m by <NAME>, DTU Wind
Info about ReadFASTbinary.m:
% Author: <NAME>, National Renewable Energy Laboratory
% (c) 2012, National Renewable Energy Laboratory
%
% Edited for FAST v7.02.00b-bjj 22-Oct-2012
'''
FileFmtID_WithTime = 1 # File identifiers used in FAST
LenName = 10 # number of characters per channel name
LenUnit = 10 # number of characters per unit name
with open(filename, 'rb') as fid:
FileID = self.fread(fid, 1, 'int16') # FAST output file format, INT(2)
NumOutChans = self.fread(fid, 1, 'int32')[0] # The number of output channels, INT(4)
NT = self.fread(fid, 1, 'int32')[0] # The number of time steps, INT(4)
if FileID == FileFmtID_WithTime:
TimeScl = self.fread(fid, 1, 'float64') # The time slopes for scaling, REAL(8)
TimeOff = self.fread(fid, 1, 'float64') # The time offsets for scaling, REAL(8)
else:
TimeOut1 = self.fread(fid, 1, 'float64') # The first time in the time series, REAL(8)
TimeIncr = self.fread(fid, 1, 'float64') # The time increment, REAL(8)
ColScl = self.fread(fid, NumOutChans, 'float32') # The channel slopes for scaling, REAL(4)
ColOff = self.fread(fid, NumOutChans, 'float32') # The channel offsets for scaling, REAL(4)
LenDesc = self.fread(fid, 1, 'int32')[0] # The number of characters in the description string, INT(4)
DescStrASCII = self.fread(fid, LenDesc, 'uint8') # DescStr converted to ASCII
DescStr = "".join(map(chr, DescStrASCII)).strip()
ChanName = [] # initialize the ChanName cell array
for iChan in range(NumOutChans + 1):
ChanNameASCII = self.fread(fid, LenName, 'uint8') # ChanName converted to numeric ASCII
ChanName.append("".join(map(chr, ChanNameASCII)).strip())
ChanUnit = [] # initialize the ChanUnit cell array
for iChan in range(NumOutChans + 1):
ChanUnitASCII = self.fread(fid, LenUnit, 'uint8') # ChanUnit converted to numeric ASCII
ChanUnit.append("".join(map(chr, ChanUnitASCII)).strip()[1:-1])
# Get the channel time series
nPts = NT * NumOutChans # number of data points in the file
if FileID == FileFmtID_WithTime:
PackedTime = self.fread(fid, NT, 'int32') # read the time data
cnt = len(PackedTime)
if cnt < NT:
raise Exception('Could not read entire %s file: read %d of %d time values' % (filename, cnt, NT))
PackedData = self.fread(fid, nPts, 'int16') # read the channel data
cnt = len(PackedData)
if cnt < nPts:
raise Exception('Could not read entire %s file: read %d of %d values' % (filename, cnt, nPts))
# Scale the packed binary to real data
data = np.array(PackedData).reshape(NT, NumOutChans)
data = (data - ColOff) / ColScl
if FileID == FileFmtID_WithTime:
time = (np.array(PackedTime) - TimeOff) / TimeScl;
else:
time = TimeOut1 + TimeIncr * np.arange(NT)
data = np.concatenate([time.reshape(NT, 1), data], 1)
info = {'name': os.path.splitext(os.path.basename(filename))[0],
'description': DescStr,
'attribute_names': ChanName,
'attribute_units': ChanUnit}
return data, ChanName #data, info
def concatenate_seeds(self, inflow, case, seeds, turbine, outfile):
'''Concatenate seeds data from FAST.Farm into a single dataframe.'''
if outfile == "BINARY":
data = []
for seed in seeds:
file = '/projects/windse/kshaler/SystemsEngineering/GriddedDatabase/FFarm/NewCases/{0}/{1}/{2}/FFarm_mod.{3}.outb'.format(inflow, case, seed, turbine)
temp_data, channel = self.load_binary_output(file)
print(str(seed) + 'temp_data size:' + print(temp_data.size))
data.append(temp_data)
concatenated_data = np.concatenate(data)
frame = pd.DataFrame(concatenated_data, columns = channel)
elif outfile == "ASCII":
result_files = ['/projects/windse/kshaler/SystemsEngineering/GriddedDatabase/FFarm/NewCases/{0}/{1}/{2}/FFarm_mod.{3}.out'.format(inflow, case, seed, turbine) for seed in seeds]
df_list = [ | pd.read_csv(file, delim_whitespace=True, header = [0,1], skiprows=6, error_bad_lines=False) | pandas.read_csv |
# Libraries
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
from time import sleep
def fetch_stock_data(stocks):
"""
Fetches stock data (per min) for last 14 days.
INPUT: List of stocks
OUTPUT: CSV files generated in data folder for all the stocks
"""
cnt=0
for stock in stocks:
time=TimeSeries(key="Enter alphavantage key",output_format='pandas')
data=time.get_intraday(symbol=stock,interval='1min',outputsize="full")
stock_df=data[0]
stock_df.to_csv("../data/Historical_Data/"+stock+".csv")
## API can only fetch data for 5 stocks in a minute
cnt+=1
if cnt==4:
cnt=0
sleep(60)
def stock_data_daily(stocks,date):
"""
Updates the csv files with the current date's data
INPUT: List of stocks and today's date
OUTPUT: CSV files generated in data folder for all the stocks
Simulation Data consists of last trading day and Historical Data consists of stock data before that day.
"""
cnt=0
for stock in stocks:
df= | pd.read_csv("../data/Historical_Data/"+stock+".csv",index_col=0) | pandas.read_csv |
import numpy as np
import mxnet as mx
import pdb
np.seterr(divide='ignore', invalid='ignore')
## for saving
import pandas as pd
import os
def COR(label, pred):
label_demeaned = label - label.mean(0)
label_sumsquares = np.sum(np.square(label_demeaned), 0)
pred_demeaned = pred - pred.mean(0)
pred_sumsquares = np.sum(np.square(pred_demeaned), 0)
cor_coef = np.diagonal(np.dot(label_demeaned.T, pred_demeaned)) / \
np.sqrt(label_sumsquares * pred_sumsquares)
return np.nanmean(cor_coef)
def write_eval(pred, label, save_dir, mode, epoch):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pred_df = pd.DataFrame(pred)
label_df = | pd.DataFrame(label) | pandas.DataFrame |
#!/data7/cschoi/anaconda3/bin/python
# to fine newly discoverd sne from http://www.rochesterastronomy.org/snimages/
import requests
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
from html_table_parser import parser_functions as parser
import astropy.io.ascii as ascii
import os
import sys
from astropy.table import Table, Column
import astropy.coordinates as coord
from astropy.time import Time
import astropy.units as u
import numpy as np
from astropy.coordinates import Angle
from astropy.coordinates import ICRS
from astropy.coordinates import SkyCoord
import pandas as pd
from datetime import datetime
os.chdir('/data7/cschoi/sngal/recent-sne-check/rochester-list')
#datetime.today().strftime("%Y%m%d%H%M%S") # YYYYmmddHHMMSS 형태의 시간 출력
#datetime.today().strftime("%Y/%m/%d %H:%M:%S") # YYYY/mm/dd HH:MM:SS 형태의 시간 출력
today=datetime.today()
today=datetime.today().strftime("%Y%m%d %H:%M:%S")[:8]
print(today)
radius=10.0 # 30 arcmin = 0.5 deg
print ('Radius '+str(radius)+' arcmin')
# print ('Reading recentsnelist.txt file ...')
# colnames=['Ra','Dec','EarliestObs','Host','Type','Last','Max','Link','Discoverer']
# latestsnelist=pd.read_table('recentsnelist.txt')
# latestsnelist=pd.read_table('recentlist.txt') #,names=colnames,data_start=1,guess='False')
# latestsnelist=ascii.read('recentsnelist.txt',delimiter='\t') #,names=colnames,data_start=1,guess='False')
imsnglist=ascii.read('/data7/cschoi/IMSNG/target/alltarget.dat')
urlall="http://www.RochesterAstronomy.org/snimages/sndateall.html" # sn date all
url='http://www.rochesterastronomy.org/snimages/sndate.html' # sndate
print ('getting table data from web page from',url)
response=requests.get(url)
print('Done, table data is obtained')
soup = BeautifulSoup(response.content, 'html.parser')
tbl=soup.find_all('table')
soup.find_all('table')[1].find_all('th')
html_table = parser.make2d(tbl[1])
df= | pd.DataFrame(html_table[1:], columns=html_table[0]) | pandas.DataFrame |
import sys
import os
from flask import Flask, escape, request, send_from_directory, redirect, url_for
import flask
import json
from flask_cors import CORS
import copy
import pandas as pd
import time
sys.path.append(os.path.abspath('../falx'))
from falx.interface import FalxInterface
from falx.utils import vis_utils
def infer_dtype(values):
return pd.api.types.infer_dtype(values, skipna=True)
def try_infer_string_type(values):
"""try to infer datatype from values """
dtype = pd.api.types.infer_dtype(values, skipna=False)
ty_func = lambda l: pd.to_numeric(l)
try:
values = ty_func(values)
dtype = | pd.api.types.infer_dtype(values, skipna=False) | pandas.api.types.infer_dtype |
import pandas as pd
import numpy as np
import warnings
import sklearn.metrics as mt
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
# To avoid warnings
warnings.filterwarnings('ignore')
def read_data(path):
"""Read and return data."""
data = pd.read_csv(path)
return data
def data_prepare(dataset):
"""Puts data in order in a few steps.
1. Delete unused columns
2. Replace NaN's with means and most frequent
3. Replace str values with ints
4. Depersonalization of some data, bringing them to a vector form
Returns prepared dataset.
"""
# Delete unused columns
unused_columns = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare']
data = dataset.drop(unused_columns, axis=1)
# Replace NaN's with means...
feature_list_1 = ['Age']
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
data[feature_list_1] = imputer.fit_transform(data[feature_list_1].astype('float64'))
# ...and most frequent
feature_list_2 = ['Survived', 'Pclass', 'SibSp', 'Parch']
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
data[feature_list_2] = imputer.fit_transform(data[feature_list_2].astype('float64'))
# Replace str values with ints
label_encoder_sex = LabelEncoder()
data['Sex'] = label_encoder_sex.fit_transform(data['Sex'].astype(str))
label_encoder_embarked = LabelEncoder()
data['Embarked'] = label_encoder_embarked.fit_transform(data['Embarked'].astype(str))
# Depersonalization of some data, bringing them to a vector form
# e.g. for Sex column will be created Sex_0 and Sex_1 columns
categorical_feature_list = ['Sex', 'Embarked', 'Pclass']
for feature in categorical_feature_list:
data[feature] = pd.Categorical(data[feature])
data_dummies = | pd.get_dummies(data[feature], prefix=feature) | pandas.get_dummies |
import pyspark
from pyspark.sql import SQLContext
import pandas as pd
import csv
import os
def load_states():
# read US states
f = open('states.txt', 'r')
states = set()
for line in f.readlines():
l = line.strip('\n')
if l != '':
states.add(l)
return states
def validate2(states, bt):
#sqlContext = SQLContext(sc)
for state in states:
if not os.path.exists("US/" + state):
continue
"""
Train
"""
train_prefix = "US/" + state + '/' + bt + "/train/" + state + "_train_"
business_train_fname = train_prefix + 'yelp_academic_dataset_business.csv'
business_train_fname2 = train_prefix + 'yelp_academic_dataset_business2.csv'
review_train_fname = train_prefix + 'yelp_academic_dataset_review.csv'
checkins_train_fname = train_prefix + 'yelp_academic_dataset_checkin.csv'
tip_train_fname = train_prefix + 'yelp_academic_dataset_tip.csv'
user_train_fname = train_prefix + 'yelp_academic_dataset_user.csv'
df_business_train = pd.read_csv(business_train_fname)
df_review_train = pd.read_csv(review_train_fname)
df_checkins_train = pd.read_csv(checkins_train_fname)
df_tip_train = pd.read_csv(tip_train_fname)
df_user_train = pd.read_csv(user_train_fname)
count_business_train = df_business_train.shape[0]
count_review_train = df_review_train.shape[0]
count_checkins_train = df_checkins_train.shape[0]
count_tip_train = df_tip_train.shape[0]
count_user_train = df_user_train.shape[0]
df_train_busi_review_count = df_review_train.groupby(['business_id']).agg(['count'])
dict_train_busi_review_count = df_train_busi_review_count['review_id'].apply(list).to_dict()['count']
new_pdf_train_busi_review_count = pd.DataFrame.from_dict(dict_train_busi_review_count, orient='index').reset_index()
new_pdf_train_busi_review_count.columns = ['business_id', 'review_count2']
df_business_train = df_business_train.join(new_pdf_train_busi_review_count.set_index('business_id'), on='business_id')
df_business_train.to_csv(business_train_fname2, index=False)
"""
Test
"""
valid_prefix = "US/" + state + '/' + bt + "/valid/" + state + "_valid_"
business_valid_fname = valid_prefix + 'yelp_academic_dataset_business.csv'
business_valid_fname2 = valid_prefix + 'yelp_academic_dataset_business2.csv'
review_valid_fname = valid_prefix + 'yelp_academic_dataset_review.csv'
checkins_valid_fname = valid_prefix + 'yelp_academic_dataset_checkin.csv'
tip_valid_fname = valid_prefix + 'yelp_academic_dataset_tip.csv'
user_valid_fname = valid_prefix + 'yelp_academic_dataset_user.csv'
df_business_valid = pd.read_csv(business_valid_fname)
df_review_valid = pd.read_csv(review_valid_fname)
df_checkins_valid = pd.read_csv(checkins_valid_fname)
df_tip_valid = pd.read_csv(tip_valid_fname)
df_user_valid = pd.read_csv(user_valid_fname)
count_business_valid = df_business_valid.shape[0]
count_review_valid = df_review_valid.shape[0]
count_checkins_valid = df_checkins_valid.shape[0]
count_tip_valid = df_tip_valid.shape[0]
count_user_valid = df_user_valid.shape[0]
df_valid_busi_review_count = df_review_valid.groupby(['business_id']).agg(['count'])
dict_valid_busi_review_count = df_valid_busi_review_count['review_id'].apply(list).to_dict()['count']
new_pdf_valid_busi_review_count = pd.DataFrame.from_dict(dict_valid_busi_review_count, orient='index').reset_index()
new_pdf_valid_busi_review_count.columns = ['business_id', 'review_count2']
df_business_valid = df_business_valid.join(new_pdf_valid_busi_review_count.set_index('business_id'), on='business_id')
df_business_valid.to_csv(business_valid_fname2, index=False)
"""
Test
"""
test_prefix = "US/" + state + '/' + bt + "/test/" + state + "_test_"
business_test_fname = test_prefix + 'yelp_academic_dataset_business.csv'
business_test_fname2 = test_prefix + 'yelp_academic_dataset_business2.csv'
review_test_fname = test_prefix + 'yelp_academic_dataset_review.csv'
checkins_test_fname = test_prefix + 'yelp_academic_dataset_checkin.csv'
tip_test_fname = test_prefix + 'yelp_academic_dataset_tip.csv'
user_test_fname = test_prefix + 'yelp_academic_dataset_user.csv'
df_business_test = pd.read_csv(business_test_fname)
df_review_test = pd.read_csv(review_test_fname)
df_checkins_test = pd.read_csv(checkins_test_fname)
df_tip_test = pd.read_csv(tip_test_fname)
df_user_test = pd.read_csv(user_test_fname)
count_business_test = df_business_test.shape[0]
count_review_test = df_review_test.shape[0]
count_checkins_test = df_checkins_test.shape[0]
count_tip_test = df_tip_test.shape[0]
count_user_test = df_user_test.shape[0]
df_test_busi_review_count = df_review_test.groupby(['business_id']).agg(['count'])
dict_test_busi_review_count = df_test_busi_review_count['review_id'].apply(list).to_dict()['count']
new_pdf_test_busi_review_count = pd.DataFrame.from_dict(dict_test_busi_review_count, orient='index').reset_index()
new_pdf_test_busi_review_count.columns = ['business_id', 'review_count2']
df_business_test = df_business_test.join(new_pdf_test_busi_review_count.set_index('business_id'), on='business_id')
df_business_test.to_csv(business_test_fname2, index=False)
# write other info to csv
with open("US/" + state + '/' + bt + '/' + state + '_stats.csv', mode='wb') as f:
writer = csv.writer(f)
writer.writerow(["Business Train Count", count_business_train])
writer.writerow(["Review Train Count", count_review_train])
writer.writerow(["Check-in Train Count", count_checkins_train])
writer.writerow(["Tip Train Count", count_tip_train])
writer.writerow(["User Train Count", count_user_train])
writer.writerow(["Business valid Count", count_business_valid])
writer.writerow(["Review valid Count", count_review_valid])
writer.writerow(["Check-in valid Count", count_checkins_valid])
writer.writerow(["Tip valid Count", count_tip_valid])
writer.writerow(["User valid Count", count_user_valid])
writer.writerow(["Business Test Count", count_business_test])
writer.writerow(["Review Test Count", count_review_test])
writer.writerow(["Check-in Test Count", count_checkins_test])
writer.writerow(["Tip Test Count", count_tip_test])
writer.writerow(["User Test Count", count_user_test])
return
def validate(states):
# sqlContext = SQLContext(sc)
for state in states:
if not os.path.exists("US/" + state):
continue
"""
Train
"""
train_prefix = "US/" + state + "/train/" + state + "_train_"
business_train_fname = train_prefix + 'yelp_academic_dataset_business.csv'
business_train_fname2 = train_prefix + 'yelp_academic_dataset_business2.csv'
review_train_fname = train_prefix + 'yelp_academic_dataset_review.csv'
checkins_train_fname = train_prefix + 'yelp_academic_dataset_checkin.csv'
tip_train_fname = train_prefix + 'yelp_academic_dataset_tip.csv'
user_train_fname = train_prefix + 'yelp_academic_dataset_user.csv'
df_business_train = pd.read_csv(business_train_fname)
df_review_train = pd.read_csv(review_train_fname)
df_checkins_train = pd.read_csv(checkins_train_fname)
df_tip_train = pd.read_csv(tip_train_fname)
df_user_train = | pd.read_csv(user_train_fname) | pandas.read_csv |
import collections
import ixmp
import itertools
import warnings
import pandas as pd
import numpy as np
from ixmp.utils import pd_read, pd_write
from message_ix.utils import isscalar, logger
def _init_scenario(s, commit=False):
"""Initialize a MESSAGEix Scenario object with default values"""
inits = (
# {
# 'test': False # some test,
# 'exec': [(pass, {'args': ()}), ],
# },
)
pass_idx = [i for i, init in enumerate(inits) if init['test']]
if len(pass_idx) == 0:
return # leave early, all init tests pass
if commit:
s.check_out()
for idx in pass_idx:
for exec_info in inits[idx]['exec']:
func = exec_info[0]
args = exec_info[1].pop('args', tuple())
kwargs = exec_info[1].pop('kwargs', dict())
func(*args, **kwargs)
if commit:
s.commit('Initialized wtih standard sets and params')
class Scenario(ixmp.Scenario):
def __init__(self, mp, model, scenario=None, version=None, annotation=None,
cache=False, clone=None, **kwargs):
"""Initialize a new message_ix.Scenario (structured input data and solution)
or get an existing scenario from the ixmp database instance
Parameters
----------
mp : ixmp.Platform
model : string
model name
scenario : string
scenario name
version : string or integer
initialize a new scenario (if version == 'new'), or
load a specific version from the database (if version is integer)
annotation : string
a short annotation/comment (when initializing a new scenario)
cache : boolean
keep all dataframes in memory after first query (default: False)
clone : Scenario, optional
make a clone of an existing scenario
"""
if 'scen' in kwargs:
warnings.warn(
'`scen` is deprecated and will be removed in the next' +
' release, please use `scenario`')
scenario = kwargs.pop('scen')
if version is not None and clone is not None:
raise ValueError(
'Can not provide both version and clone as arguments')
if clone is not None:
jscen = clone._jobj.clone(model, scenario, annotation,
clone._keep_sol, clone._first_model_year)
elif version == 'new':
scheme = 'MESSAGE'
jscen = mp._jobj.newScenario(model, scenario, scheme, annotation)
elif isinstance(version, int):
jscen = mp._jobj.getScenario(model, scenario, version)
else:
jscen = mp._jobj.getScenario(model, scenario)
self.is_message_scheme = True
super(Scenario, self).__init__(mp, model, scenario, jscen, cache=cache)
if not self.has_solution():
_init_scenario(self, commit=version != 'new')
def cat_list(self, name):
"""return a list of all categories for a set
Parameters
----------
name : string
name of the set
"""
return ixmp.to_pylist(self._jobj.getTypeList(name))
def add_cat(self, name, cat, keys, is_unique=False):
"""add a set element key to the respective category mapping
Parameters
----------
name : string
name of the set
cat : string
name of the category
keys : list of strings
element keys to be added to the category mapping
"""
self._jobj.addCatEle(name, str(cat), ixmp.to_jlist(keys), is_unique)
def cat(self, name, cat):
"""return a list of all set elements mapped to a category
Parameters
----------
name : string
name of the set
cat : string
name of the category
"""
return ixmp.to_pylist(self._jobj.getCatEle(name, cat))
def has_solution(self):
"""Returns True if scenario currently has a solution"""
try:
return not np.isnan(self.var('OBJ')['lvl'])
except Exception:
return False
def add_spatial_sets(self, data):
"""Add sets related to spatial dimensions of the model
Parameters
----------
data : dict or other
Examples
--------
data = {'country': 'Austria'}
data = {'country': ['Austria', 'Germany']}
data = {'country': {'Austria': {'state': ['Vienna', 'Lower Austria']}}}
"""
nodes = []
levels = []
hierarchy = []
def recurse(k, v, parent='World'):
if isinstance(v, collections.Mapping):
for _parent, _data in v.items():
for _k, _v in _data.items():
recurse(_k, _v, parent=_parent)
level = k
children = [v] if isscalar(v) else v
for child in children:
hierarchy.append([level, child, parent])
nodes.append(child)
levels.append(level)
for k, v in data.items():
recurse(k, v)
self.add_set("node", nodes)
self.add_set("lvl_spatial", levels)
self.add_set("map_spatial_hierarchy", hierarchy)
def add_horizon(scenario, data):
"""Add sets related to temporal dimensions of the model
Parameters
----------
scenario : ixmp.Scenario
data : dict or other
Examples
--------
data = {'year': [2010, 2020]}
data = {'year': [2010, 2020], 'firstmodelyear': 2020}
"""
if 'year' not in data:
raise ValueError('"year" must be in temporal sets')
horizon = data['year']
scenario.add_set("year", horizon)
first = data['firstmodelyear'] if 'firstmodelyear'\
in data else horizon[0]
scenario.add_cat("year", "firstmodelyear", first, is_unique=True)
def vintage_and_active_years(self, ya_args=None, in_horizon=True):
"""Return a 2-tuple of valid pairs of vintage years and active years
for use with data input. A valid year-vintage, year-active pair is
one in which:
- year-vintage <= year-active
- both within the model's 'year' set
- year-active >= the model's first year *or* within
ixmp.Scenario.years_active() for a given node, technology and vintage
(optional)
Parameters
----------
ya_args : arguments to ixmp.Scenario.years_active(), optional
in_horizon : restrict years returned to be within the current model
horizon, optional, default: True
"""
horizon = self.set('year')
first = self.cat('year', 'firstmodelyear')[0] or horizon[0]
if ya_args:
if len(ya_args) != 3:
raise ValueError('3 arguments are required if using `ya_args`')
years_active = self.years_active(*ya_args)
combos = itertools.product([ya_args[2]], years_active)
else:
combos = itertools.product(horizon, horizon)
# TODO: casting to int here is probably bad, but necessary for now
first = int(first)
combos = [(int(y1), int(y2)) for y1, y2 in combos]
def valid(y_v, y_a):
# TODO: casting to int here is probably bad
ret = y_v <= y_a
if in_horizon:
ret &= y_a >= first
return ret
year_pairs = [(y_v, y_a) for y_v, y_a in combos if valid(y_v, y_a)]
v_years, a_years = zip(*year_pairs)
return pd.DataFrame({'year_vtg': v_years, 'year_act': a_years})
def solve(self, model='MESSAGE', **kwargs):
"""Solve a MESSAGE Scenario. See ixmp.Scenario.solve() for arguments.
The default model is 'MESSAGE', but can be overwritten with, e.g.,
`message_ix.Scenario.solve(model='MESSAGE-MACRO')`.
"""
return super(Scenario, self).solve(model=model, **kwargs)
def clone(self, model=None, scenario=None, annotation=None,
keep_solution=True, first_model_year=None, **kwargs):
"""clone the current scenario and return the new scenario
Parameters
----------
model : string
new model name
scenario : string
new scenario name
annotation : string
explanatory comment (optional)
keep_solution : boolean, default, True
indicator whether to include an existing solution
in the cloned scenario
first_model_year: int, default None
new first model year in cloned scenario
('slicing', only available for MESSAGE-scheme scenarios)
"""
if 'keep_sol' in kwargs:
warnings.warn(
'`keep_sol` is deprecated and will be removed in the next' +
' release, please use `keep_solution`')
keep_solution = kwargs.pop('keep_sol')
if 'scen' in kwargs:
warnings.warn(
'`scen` is deprecated and will be removed in the next' +
' release, please use `scenario`')
scenario = kwargs.pop('scen')
self._keep_sol = keep_solution
self._first_model_year = first_model_year or 0
model = self.model if not model else model
scenario = self.scenario if not scenario else scenario
return Scenario(self.platform, model, scenario, annotation=annotation,
cache=self._cache, clone=self)
def rename(self, name, mapping, keep=False):
"""Rename an element in a set
Parameters
----------
name : str
name of the set to change (e.g., 'technology')
mapping : str
mapping of old (current) to new set element names
keep : bool, optional, default: False
keep the old values in the model
"""
try:
self.check_out()
commit = True
except:
commit = False
keys = list(mapping.keys())
values = list(mapping.values())
# search for from_tech in sets and replace
for item in self.set_list():
ix_set = self.set(item)
if isinstance(ix_set, pd.DataFrame):
if name in ix_set.columns and not ix_set.empty:
for key, value in mapping.items():
df = ix_set[ix_set[name] == key]
if not df.empty:
df[name] = value
self.add_set(item, df)
elif ix_set.isin(keys).any(): # ix_set is pd.Series
for key, value in mapping.items():
if ix_set.isin([key]).any():
self.add_set(item, value)
# search for from_tech in pars and replace
for item in self.par_list():
if name not in self.idx_names(item):
continue
for key, value in mapping.items():
df = self.par(item, filters={name: [key]})
if not df.empty:
df[name] = value
self.add_par(item, df)
# this removes all instances of from_tech in the model
if not keep:
for key in keys:
self.remove_set(name, key)
# commit
if commit:
self.commit('Renamed {} using mapping {}'.format(name, mapping))
def to_excel(self, fname):
"""Save a scenario as an Excel file. NOTE: Cannot export
solution currently (only model data) due to limitations in excel sheet
names (cannot have multiple sheet names which are identical except for
upper/lower case).
Parameters
----------
fname : string
path to file
"""
funcs = {
'set': (self.set_list, self.set),
'par': (self.par_list, self.par),
}
ix_name_map = {}
dfs = {}
for ix_type, (list_func, get_func) in funcs.items():
for item in list_func():
df = get_func(item)
df = | pd.Series(df) | pandas.Series |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import binary_sampler
def data_loader(data_name, miss_rate, target_column=None):
"""Loads datasets and introduce missingness.
Args:
- data_name: letter, spam, or mnist
- miss_rate: the probability of missing components
Returns:
data_x: original data
miss_data_x: data with missing values
data_m: indicator matrix for missing components
"""
file_name = 'data/' + data_name + '.csv'
print(file_name)
data_x = | pd.read_csv(file_name, delimiter=',') | pandas.read_csv |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
from zenml.preprocessing import (add_prefix, add_suffix, strip_whitespace, string_to_float,
remove_string, replace_string_with_nan, replace_nan_with_string,
like_float_to_int)
class TestText(unittest.TestCase):
def test_add_prefix(self):
df = pd.DataFrame({'male_names': ['Bobby', 'John']})
result = pd.DataFrame({'male_names': ['mr_Bobby', 'mr_John']}).male_names
assert_series_equal(add_prefix('mr_', df.male_names), result, check_dtype=True)
def test_add_suffix(self):
df = pd.DataFrame({'male_names': ['Bobby', 'John']})
result = pd.DataFrame({'male_names': ['Bobby-male', 'John-male']}).male_names
assert_series_equal(add_suffix('-male', df.male_names), result, check_dtype=True)
def test_strip_whitespace(self):
df = pd.DataFrame({'description': [' circus at the whitehouse ', 'politics suck ']})
result = pd.DataFrame({'description': ['circus at the whitehouse', 'politics suck']}).description
assert_series_equal(strip_whitespace(df.description), result, check_dtype=True)
def test_string_to_float(self):
df = | pd.DataFrame({'probs': ['0.3', '0.8', 2]}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 14:08:43 2019
to produce X and y use combine_pos_neg_from_nc_file or
prepare_X_y_for_holdout_test
@author: ziskin
"""
from PW_paths import savefig_path
from PW_paths import work_yuval
from pathlib import Path
cwd = Path().cwd()
hydro_path = work_yuval / 'hydro'
axis_path = work_yuval/'axis'
gis_path = work_yuval / 'gis'
ims_path = work_yuval / 'IMS_T'
hydro_ml_path = hydro_path / 'hydro_ML'
gnss_path = work_yuval / 'GNSS_stations'
# 'tela': 17135
hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165,
'ramo': 56140, 'drag': 48125, 'dsea': 48192,
'spir': 56150, 'nrif': 60105, 'elat': 60190
}
hydro_st_name_dict = {25191: 'Lavan - new nizana road',
21105: 'Shikma - Tel milcha',
55165: 'Mamsheet',
56140: 'Ramon',
48125: 'Draga',
48192: 'Chiemar - down the cliff',
46150: 'Nekrot - Top',
60105: 'Yaelon - Kibutz Yahel',
60190: 'Solomon - Eilat'}
best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02,
'coef0': 0.0, 'degree': 1},
'RF': {'max_depth': 5, 'max_features': 'auto',
'min_samples_leaf': 1, 'min_samples_split': 2,
'n_estimators': 400},
'MLP': {'alpha': 0.1, 'activation': 'relu',
'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant',
'solver': 'lbfgs'}}
scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss']
tsafit_dict = {'lat': 30.985556, 'lon': 35.263056,
'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'}
axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha']
soi_axis_dict = {'yrcm': 'Dimo',
'slom': 'Ohad',
'dsea': 'Ddse',
'nrif': 'Yotv',
'elat': 'Elat',
'klhv': 'Raha',
'spir': 'Yaha'}
def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True):
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from natsort import natsorted
features = ['pwv', 'pressure', 'DOY']
# sns.set_palette('Dark2', 6)
sns.set_theme(style='ticks', font_scale=1.5)
# sns.set_style('whitegrid')
# sns.set_style('ticks')
sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True)
gr_spec = [20, 20, 1]
fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec})
try:
axes.flatten()
except AttributeError:
axes = [axes]
for i, f in enumerate(features):
fe = [x for x in sv['feature'].values if f in x]
dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe()
title = '{}'.format(f.upper())
dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20,
width=.8, color='k', alpha=0.8)
axes[i].set_title(title)
dsf_sum = dsf.sum().tolist()
handles, labels = axes[i].get_legend_handles_labels()
labels = [
'{} ({:.1f} %)'.format(
x, y) for x, y in zip(
labels, dsf_sum)]
# axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center')
axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)')
axes[i].grid(axis='y', zorder=1)
if fix_xticklabels:
# n = sum(['pwv' in x for x in sv.feature.values])
axes[2].xaxis.set_ticklabels('')
axes[2].set_xlabel('')
hrs = np.arange(-1, -25, -1)
axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12)
axes[2].tick_params()
axes[0].set_xlabel('Hours prior to flood')
axes[1].set_xlabel('Hours prior to flood')
fig.tight_layout()
filename = 'RF_shap_values_{}.png'.format('+'.join(features))
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def read_binary_classification_shap_values_to_pandas(shap_values, X):
import xarray as xr
SV0 = X.copy(data=shap_values[0])
SV1 = X.copy(data=shap_values[1])
SV = xr.concat([SV0, SV1], dim='clas')
SV['clas'] = [0, 1]
return SV
def get_shap_values_RF_classifier(plot=True):
import shap
X, y = combine_pos_neg_from_nc_file()
ml = ML_Classifier_Switcher()
rf = ml.pick_model('RF')
rf.set_params(**best_hp_models_dict['RF'])
X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy'])
rf.fit(X, y)
explainer = shap.TreeExplainer(rf)
shap_values = explainer.shap_values(X.values)
if plot:
shap.summary_plot(shap_values, X, feature_names=[
x for x in X.feature.values], max_display=49, sort=False)
return shap_values
def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval):
import pandas as pd
import xarray as xr
from PW_stations import produce_geo_gnss_solved_stations
from interpolation_routines import interpolate_var_ds_at_multiple_dts
from aux_gps import save_ncfile
# get gnss soi-apn pwv data and geo-meta data:
geo_df = produce_geo_gnss_solved_stations(plot=False)
pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw = pw.sel(time=slice('2018-04-25', '2018-04-26'))
pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom'])
# get tsafit data:
predict_df = pd.DataFrame(tsafit_dict, index=['tsafit'])
df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df)
da=df_inter['interpolated_lr_fixed'].to_xarray()
da.name = 'pwv'
da.attrs['operation'] = 'interploated from SOI-APN PWV data'
da.attrs['WV scale height'] = 'variable from SOI-APN data'
da.attrs.update(**tsafit_dict)
if savepath is not None:
filename = 'Tsafit_PWV_event.nc'
save_ncfile(da, savepath, filename)
return da
def plot_tsafit_event(path=work_yuval):
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='ticks', font_scale=1.5)
da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc')
fig, ax = plt.subplots(figsize=(11, 8))
da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00'))
# da_sliced.name = 'PWV [mm]'
da_sliced = da_sliced.rename({'time': 'Time [UTC]'})
da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False)
dt = pd.to_datetime(da.attrs['dt_utc'])
ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T')
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event'])
ax.grid(True)
# ax.set_xlabel('Time [UTC]')
fig.tight_layout()
fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26')
fig.subplots_adjust(top=0.941)
return fig
# TODO: treat all pwv from events as follows:
# For each station:
# 0) rolling mean to all pwv 1 hour
# 1) take 288 points before events, if < 144 gone then drop
# 2) interpolate them 12H using spline/other
# 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station
# 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear
# pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features
# fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure
# 5) feature addition: should be like pwv steps 1-3,
# 6) negative events should be sampled separtely, for
# 7) now prepare pwv and pressure to single ds with 1 hourly sample rate
# 8) produce positives and save them to file!
# 9) produce a way to get negatives considering the positives
# maybe implement permutaion importance to pwv ? see what is more important to
# the model in 24 hours ? only on SVC and MLP ?
# implemetn TSS and HSS scores and test them (make_scorer from confusion matrix)
# redo results but with inner and outer splits of 4, 4
# plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire
# train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should
# use holdout set
# implement repeatedstratifiedkfold and run it...
# check for stability of the gridsearch CV...also run with 4-folds ?
# finalize the permutation_importances and permutation_test_scores
def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import xr_reindex_with_date_range
feats = xr.load_dataset(
hydro_path/'hydro_tides_hourly_features_with_positives.nc')
ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'})
da_list = []
for da in ds:
time = ds[da].dropna('time')
daa = time.copy(data=np.ones(time.shape))
daa['time'] = pd.to_datetime(time.values)
daa.name = time.name + '_tide'
da_list.append(daa)
ds = xr.merge(da_list)
li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds]
ds = xr.merge(li)
return ds
def select_features_from_X(X, features='pwv'):
if isinstance(features, str):
f = [x for x in X.feature.values if features in x]
X = X.sel(feature=f)
elif isinstance(features, list):
fs = []
for f in features:
fs += [x for x in X.feature.values if f in x]
X = X.sel(feature=fs)
return X
def combine_pos_neg_from_nc_file(hydro_path=hydro_path,
negative_sample_num=1,
seed=1, std=True):
from aux_gps import path_glob
from sklearn.utils import resample
import xarray as xr
import numpy as np
# import pandas as pd
if std:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1]
else:
file = path_glob(
hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1]
ds = xr.open_dataset(file)
# get the positive features and produce target:
X_pos = ds['X_pos'].rename({'positive_sample': 'sample'})
y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample'])
y_pos['sample'] = X_pos['sample']
# choose at random y_pos size of negative class:
X_neg = ds['X_neg'].rename({'negative_sample': 'sample'})
pos_size = y_pos['sample'].size
np.random.seed(seed)
# negatives = []
for n_samples in [x for x in range(negative_sample_num)]:
# dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size,
# replace=False)
# print(np.unique(dts).shape)
# negatives.append(X_neg.sel(sample=dts))
negative = resample(X_neg, replace=False,
n_samples=pos_size * negative_sample_num,
random_state=seed)
negatives = np.split(negative, negative_sample_num, axis=0)
Xs = []
ys = []
for X_negative in negatives:
y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample'])
y_neg['sample'] = X_negative['sample']
# now concat all X's and y's:
X = xr.concat([X_pos, X_negative], 'sample')
y = xr.concat([y_pos, y_neg], 'sample')
X.name = 'X'
Xs.append(X)
ys.append(y)
if len(negatives) == 1:
return Xs[0], ys[0]
else:
return Xs, ys
def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True):
import numpy as np
Xcopy = X.copy()
pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pwvs_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pwvs_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop)
pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)]
if set(pressures_to_drop).issubset(set(X.feature.values)):
if verbose:
print('dropping {} from X.'.format(', '.join(pressures_to_drop)))
Xcopy = Xcopy.drop_sel(feature=pressures_to_drop)
return Xcopy
def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path):
import xarray as xr
import pandas as pd
pos_da = xr.open_dataset(
hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X']
dt_pos = pos_da.sample.to_dataframe()
dt_neg = neg_da.sample.to_dataframe()
dt_all = dt_pos.index.union(dt_neg.index)
dff = pd.DataFrame(dt_all, index=dt_all)
dff = dff.sort_index()
samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))]
num = samples_within.size
print('samples that are within a day of each other: {}'.format(num))
print('samples are: {}'.format(samples_within))
return dff
def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42,
batches=1, verbose=1, std=True):
# do the same thing for pressure (as for pwv), but not for
import xarray as xr
import numpy as np
import pandas as pd
from aux_gps import save_ncfile
feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc')
feats = feats.rename({'doy': 'DOY'})
if std:
pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
pos_filename = 'hydro_tides_hourly_features_with_positives.nc'
all_tides = xr.open_dataset(
hydro_path / pos_filename)['X_pos']
# pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes']
tides = xr.open_dataset(
hydro_path / pos_filename)['Tides']
# get the positives (tide events) for each station:
df_stns = tides.to_dataset('GNSS').to_dataframe()
# get all positives (tide events) for all stations:
df = all_tides.positive_sample.to_dataframe()['positive_sample']
df.columns = ['sample']
stns = [x for x in hydro_pw_dict.keys()]
other_feats = ['DOY', 'doy_sin', 'doy_cos']
# main stns df features (pwv)
pwv_df = feats[stns].to_dataframe()
pressure = feats['bet-dagan'].to_dataframe()['bet-dagan']
# define the initial no_choice_dt_range from the positive dt_range:
no_choice_dt_range = [pd.date_range(
start=dt, periods=48, freq='H') for dt in df]
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack(no_choice_dt_range)))
dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range)
# dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range)
# loop over all stns and produce negative events:
np.random.seed(seed)
neg_batches = []
for i in np.arange(1, batches + 1):
if verbose >= 0:
print('preparing batch {}:'.format(i))
neg_stns = []
for stn in stns:
dts_df = df_stns[stn].dropna()
pwv = pwv_df[stn].dropna()
# loop over all events in on stn:
negatives = []
negatives_pressure = []
# neg_samples = []
if verbose >= 1:
print('finding negatives for station {}, events={}'.format(
stn, len(dts_df)))
# print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M')))
cnt = 0
while cnt < len(dts_df):
# get random number from each stn pwv:
# r = np.random.randint(low=0, high=len(pwv.index))
# random_dt = pwv.index[r]
random_dt = np.random.choice(dts_to_choose_from)
negative_dt_range = pd.date_range(
start=random_dt, periods=24, freq='H')
if not (no_choice_dt_range.intersection(negative_dt_range)).empty:
# print('#')
if verbose >= 2:
print('Overlap!')
continue
# get the actual pwv and check it is full (24hours):
negative = pwv.loc[pwv.index.intersection(negative_dt_range)]
neg_pressure = pressure.loc[pwv.index.intersection(
negative_dt_range)]
if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24:
# print('!')
if verbose >= 2:
print('NaNs!')
continue
if verbose >= 2:
print('number of dts that are already chosen: {}'.format(
len(no_choice_dt_range)))
negatives.append(negative)
negatives_pressure.append(neg_pressure)
# now add to the no_choice_dt_range the negative dt_range we just aquired:
negative_dt_range_with_padding = pd.date_range(
start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H')
no_choice_dt_range = pd.DatetimeIndex(
np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding])))
dts_to_choose_from = dts_to_choose_from.difference(
no_choice_dt_range)
if verbose >= 2:
print('number of dts to choose from: {}'.format(
len(dts_to_choose_from)))
cnt += 1
neg_da = xr.DataArray(negatives, dims=['sample', 'feature'])
neg_da['feature'] = ['{}_{}'.format(
'pwv', x) for x in np.arange(1, 25)]
neg_samples = [x.index[0] for x in negatives]
neg_da['sample'] = neg_samples
neg_pre_da = xr.DataArray(
negatives_pressure, dims=['sample', 'feature'])
neg_pre_da['feature'] = ['{}_{}'.format(
'pressure', x) for x in np.arange(1, 25)]
neg_pre_samples = [x.index[0] for x in negatives_pressure]
neg_pre_da['sample'] = neg_pre_samples
neg_da = xr.concat([neg_da, neg_pre_da], 'feature')
neg_da = neg_da.sortby('sample')
neg_stns.append(neg_da)
da_stns = xr.concat(neg_stns, 'sample')
da_stns = da_stns.sortby('sample')
# now loop over the remaining features (which are stns agnostic)
# and add them with the same negative datetimes of the pwv already aquired:
dts = [pd.date_range(x.item(), periods=24, freq='H')
for x in da_stns['sample']]
dts_samples = [x[0] for x in dts]
other_feat_list = []
for feat in feats[other_feats]:
# other_feat_sample_list = []
da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample'])
# for dt in dts_samples:
# da_other = xr.DataArray(feats[feat].sel(
# time=dt).values, dims=['feature'])
da_other['sample'] = dts_samples
other_feat_list.append(da_other)
# other_feat_da = xr.concat(other_feat_sample_list, 'feature')
da_other_feats = xr.concat(other_feat_list, 'feature')
da_other_feats['feature'] = other_feats
da_stns = xr.concat([da_stns, da_other_feats], 'feature')
neg_batches.append(da_stns)
neg_batch_da = xr.concat(neg_batches, 'sample')
# neg_batch_da['batch'] = np.arange(1, batches + 1)
neg_batch_da.name = 'X_neg'
feats['X_neg'] = neg_batch_da
feats['X_pos'] = all_tides
feats['X_pwv_stns'] = tides
# feats['tide_datetimes'] = pos_tides
feats = feats.rename({'sample': 'negative_sample'})
if std:
filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format(
batches)
else:
filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format(
batches)
save_ncfile(feats, hydro_path, filename)
return neg_batch_da
def produce_positives_from_feature_file(hydro_path=hydro_path, std=True):
import xarray as xr
import pandas as pd
import numpy as np
from aux_gps import save_ncfile
# load features:
if std:
file = hydro_path / 'hydro_tides_hourly_features_std.nc'
else:
file = hydro_path / 'hydro_tides_hourly_features.nc'
feats = xr.load_dataset(file)
feats = feats.rename({'doy': 'DOY'})
# load positive event for each station:
dfs = [read_station_from_tide_database(hydro_pw_dict.get(
x), rounding='1H') for x in hydro_pw_dict.keys()]
dfs = check_if_tide_events_from_stations_are_within_time_window(
dfs, days=1, rounding=None, return_hs_list=True)
da_list = []
positives_per_station = []
for i, feat in enumerate(feats):
try:
_, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i],
plot=False, rolling=None,
days_prior=1,
drop_thresh=0.75,
max_gap='6H',
verbose=0)
print('getting positives from station {}'.format(feat))
positives = [pd.to_datetime(
(x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr]
da = xr.DataArray(pr, dims=['sample', 'feature'])
da['sample'] = positives
positives_per_station.append(positives)
da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)]
da_list.append(da)
except IndexError:
continue
da_pwv = xr.concat(da_list, 'sample')
da_pwv = da_pwv.sortby('sample')
# now add more features:
da_list = []
for feat in ['bet-dagan']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt_end in da_pwv.sample:
dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H')
dt_end_end = pd.to_datetime(
dt_end.item()) - pd.Timedelta(1, unit='H')
positive = feats[feat].sel(time=slice(dt_st, dt_end_end))
positives.append(positive)
da = xr.DataArray(positives, dims=['sample', 'feature'])
da['sample'] = da_pwv.sample
if feat == 'bet-dagan':
feat_name = 'pressure'
else:
feat_name = feat
da['feature'] = ['{}_{}'.format(feat_name, x)
for x in np.arange(1, 25)]
da_list.append(da)
da_f = xr.concat(da_list, 'feature')
da_list = []
for feat in ['DOY', 'doy_sin', 'doy_cos']:
print('getting positives from feature {}'.format(feat))
positives = []
for dt in da_pwv.sample:
positive = feats[feat].sel(time=dt)
positives.append(positive)
da = xr.DataArray(positives, dims=['sample'])
da['sample'] = da_pwv.sample
# da['feature'] = feat
da_list.append(da)
da_ff = xr.concat(da_list, 'feature')
da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos']
da = xr.concat([da_pwv, da_f, da_ff], 'feature')
if std:
filename = 'hydro_tides_hourly_features_with_positives_std.nc'
else:
filename = 'hydro_tides_hourly_features_with_positives.nc'
feats['X_pos'] = da
# now add positives per stations:
pdf = pd.DataFrame(positives_per_station).T
pdf.index.name = 'tide_event'
pos_da = pdf.to_xarray().to_array('GNSS')
pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()]
pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.'
feats['Tides'] = pos_da
# rename sample to positive sample:
feats = feats.rename({'sample': 'positive_sample'})
save_ncfile(feats, hydro_path, filename)
return feats
def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path,
savepath=hydro_path, std=True):
import xarray as xr
from aux_gps import save_ncfile
import numpy as np
# pwv = xr.load_dataset(
if std:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc'
pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc'
else:
pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc'
pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc'
# work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc')
pwv = xr.load_dataset(work_path / pwv_filename)
pwv_stations = [x for x in hydro_pw_dict.keys()]
pwv = pwv[pwv_stations]
# pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True)
pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True)
# bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc')
bd = xr.load_dataset(ims_path / pre_filename)
# min_time = pwv.dropna('time')['time'].min()
# bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean()
bd = bd.sel(time=slice('1996', None))
pressure = bd['bet-dagan']
doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear)
doy.name = 'doy'
doy_sin = np.sin(doy * np.pi / 183)
doy_sin.name = 'doy_sin'
doy_cos = np.cos(doy * np.pi / 183)
doy_cos.name = 'doy_cos'
ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos])
if std:
filename = 'hydro_tides_hourly_features_std.nc'
else:
filename = 'hydro_tides_hourly_features.nc'
save_ncfile(ds, savepath, filename)
return ds
def plot_all_decompositions(X, y, n=2):
import xarray as xr
models = [
'PCA',
'LDA',
'ISO_MAP',
'LLE',
'LLE-modified',
'LLE-hessian',
'LLE-ltsa',
'MDA',
'RTE',
'SE',
'TSNE',
'NCA']
names = [
'Principal Components',
'Linear Discriminant',
'Isomap',
'Locally Linear Embedding',
'Modified LLE',
'Hessian LLE',
'Local Tangent Space Alignment',
'MDS embedding',
'Random forest',
'Spectral embedding',
't-SNE',
'NCA embedding']
name_dict = dict(zip(models, names))
da = xr.DataArray(models, dims=['model'])
da['model'] = models
fg = xr.plot.FacetGrid(da, col='model', col_wrap=4,
sharex=False, sharey=False)
for model_str, ax in zip(da['model'].values, fg.axes.flatten()):
model = model_str.split('-')[0]
method = model_str.split('-')[-1]
if model == method:
method = None
try:
ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax)
except ValueError:
pass
ax.set_title(name_dict[model_str])
ax.set_xlabel('')
ax.set_ylabel('')
fg.fig.suptitle('various decomposition projections (n={})'.format(n))
return
def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None):
from sklearn import (manifold, decomposition, ensemble,
discriminant_analysis, neighbors)
import matplotlib.pyplot as plt
import pandas as pd
# from mpl_toolkits.mplot3d import Axes3D
n_neighbors = 30
if model == 'PCA':
X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X)
elif model == 'LDA':
X2 = X.copy()
X2.values.flat[::X.shape[1] + 1] += 0.01
X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n
).fit_transform(X2, y)
elif model == 'ISO_MAP':
X_decomp = manifold.Isomap(
n_neighbors, n_components=n).fit_transform(X)
elif model == 'LLE':
# method = 'standard', 'modified', 'hessian' 'ltsa'
if method is None:
method = 'standard'
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method=method)
X_decomp = clf.fit_transform(X)
elif model == 'MDA':
clf = manifold.MDS(n_components=n, n_init=1, max_iter=100)
X_decomp = clf.fit_transform(X)
elif model == 'RTE':
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=n)
X_decomp = pca.fit_transform(X_transformed)
elif model == 'SE':
embedder = manifold.SpectralEmbedding(n_components=n, random_state=0,
eigen_solver="arpack")
X_decomp = embedder.fit_transform(X)
elif model == 'TSNE':
tsne = manifold.TSNE(n_components=n, init='pca', random_state=0)
X_decomp = tsne.fit_transform(X)
elif model == 'NCA':
nca = neighbors.NeighborhoodComponentsAnalysis(init='random',
n_components=n, random_state=0)
X_decomp = nca.fit_transform(X, y)
df = pd.DataFrame(X_decomp)
df.columns = [
'{}_{}'.format(
model,
x +
1) for x in range(
X_decomp.shape[1])]
df['flood'] = y
df['flood'] = df['flood'].astype(int)
df_1 = df[df['flood'] == 1]
df_0 = df[df['flood'] == 0]
if X_decomp.shape[1] == 1:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_1'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_1'.format(model),
color='r', marker='x',
label='0',
s=50)
elif X_decomp.shape[1] == 2:
if ax is not None:
df_1.plot.scatter(ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b', marker='s', alpha=0.3,
label='1',
s=50)
else:
ax = df_1.plot.scatter(
x='{}_1'.format(model),
y='{}_2'.format(model),
color='b',
label='1',
s=50)
df_0.plot.scatter(
ax=ax,
x='{}_1'.format(model),
y='{}_2'.format(model),
color='r',
label='0',
s=50)
elif X_decomp.shape[1] == 3:
ax = plt.figure().gca(projection='3d')
# df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee)
ax.scatter(df_1['{}_1'.format(model)],
df_1['{}_2'.format(model)],
df_1['{}_3'.format(model)],
color='b',
label='1',
s=50)
ax.scatter(df_0['{}_1'.format(model)],
df_0['{}_2'.format(model)],
df_0['{}_3'.format(model)],
color='r',
label='0',
s=50)
ax.set_xlabel('{}_1'.format(model))
ax.set_ylabel('{}_2'.format(model))
ax.set_zlabel('{}_3'.format(model))
return ax
def permutation_scikit(X, y, cv=False, plot=True):
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
if not cv:
clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.032374575428176434,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf = SVC(kernel='linear')
# clf = LinearDiscriminantAnalysis()
cv = StratifiedKFold(4, shuffle=True)
# cv = KFold(4, shuffle=True)
n_classes = 2
score, permutation_scores, pvalue = permutation_test_score(
clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
else:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=42)
param_grid = {
'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50),
'kernel': ['rbf', 'poly', 'sigmoid']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
print(grid.best_estimator_)
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test, grid_predictions))
return
def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv,
kfold_name='inner_kfold'):
from sklearn.model_selection import GridSearchCV
import xarray as xr
import numpy as np
if isinstance(model, GridSearchCV):
model = model.best_estimator_
ds_list = []
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_true = y[val]
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
y_true_da = xr.DataArray(y_true, dims=['sample'])
y_pred_da = xr.DataArray(y_pred, dims=['sample'])
y_prob_da = xr.DataArray(lr_probs, dims=['sample'])
ds = xr.Dataset()
ds['y_true'] = y_true_da
ds['y_pred'] = y_pred_da
ds['y_prob'] = y_prob_da
ds['sample'] = np.arange(0, len(X[val]))
ds_list.append(ds)
ds = xr.concat(ds_list, kfold_name)
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
return ds
def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'):
import numpy as np
import xarray as xr
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# TODO: collect all predictions and y_tests from this, also predict_proba
# and save, then calculte everything elsewhere.
if isinstance(model, GridSearchCV):
model = model.best_estimator_
tprs = []
aucs = []
pr = []
pr_aucs = []
mean_fpr = np.linspace(0, 1, 100)
for i, (train, val) in enumerate(cv.split(X, y)):
model.fit(X[train], y[train])
y_pred = model.predict(X[val])
try:
lr_probs = model.predict_proba(X[val])
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
except AttributeError:
lr_probs = model.decision_function(X[val])
fpr, tpr, _ = roc_curve(y[val], y_pred)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(roc_auc_score(y[val], y_pred))
precision, recall, _ = precision_recall_curve(y[val], lr_probs)
pr.append(recall)
average_precision = average_precision_score(y[val], y_pred)
pr_aucs.append(average_precision)
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# std_tpr = np.std(tprs, axis=0)
tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr'])
auc_da = xr.DataArray(aucs, dims=[kfold_name])
ds = xr.Dataset()
ds['TPR'] = tpr_da
ds['AUC'] = auc_da
ds['fpr'] = mean_fpr
ds[kfold_name] = np.arange(1, cv.n_splits + 1)
# variability for each tpr is ds['TPR'].std('kfold')
return ds
def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv',
n_splits=3, test_ratio=0.25,
scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X, model_name, features)
if param_grid == 'light':
print(np.unique(X.feature.values))
# first take out the hold-out set:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=seed,
stratify=y)
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format(
model_name, '+'.join(features), '+'.join(scorers), n_splits,
int(test_ratio*100), param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
# gr, _ = process_gridsearch_results(
# gr_search, model_name, split_dim='kfold', features=X.feature.values)
# remove_digits = str.maketrans('', '', digits)
# features = list(set([x.translate(remove_digits).split('_')[0]
# for x in X.feature.values]))
# # add more attrs, features etc:
# gr.attrs['features'] = features
return gr_search
def select_doy_from_feature_list(X, model_name='RF', features='pwv'):
# first if RF chosen, replace the cyclic coords of DOY (sin and cos) with
# the DOY itself.
if isinstance(features, list):
feats = features.copy()
else:
feats = features
if model_name == 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('DOY')
elif isinstance(features, str):
feats = 'DOY'
elif model_name != 'RF' and 'doy' in features:
if isinstance(features, list):
feats.remove('doy')
feats.append('doy_sin')
feats.append('doy_cos')
elif isinstance(features, str):
feats = ['doy_sin']
feats.append('doy_cos')
X = select_features_from_X(X, feats)
return X
def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv',
n_splits=4, scorers=['f1', 'recall', 'tss', 'hss',
'precision', 'accuracy'],
seed=42, savepath=None, verbose=0,
param_grid='normal', n_jobs=-1,
n_repeats=None, outer_split='1-1'):
# from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
# from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
# from string import digits
import numpy as np
# import xarray as xr
scores_dict = {s: s for s in scorers}
if 'tss' in scorers:
scores_dict['tss'] = make_scorer(tss_score)
if 'hss' in scorers:
scores_dict['hss'] = make_scorer(hss_score)
X = select_doy_from_feature_list(X_val, model_name, features)
y = y_val
if param_grid == 'light':
print(np.unique(X.feature.values))
if n_repeats is None:
# configure the cross-validation procedure
cv = StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=seed)
print('CV StratifiedKfolds of {}.'.format(n_splits))
# define the model and search space:
else:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=seed)
print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(
n_splits, n_repeats))
ml = ML_Classifier_Switcher()
print('param grid group is set to {}.'.format(param_grid))
if outer_split == '1-1':
cv_type = 'holdout'
print('holdout cv is selected.')
else:
cv_type = 'nested'
print('nested cv {} out of {}.'.format(
outer_split.split('-')[0], outer_split.split('-')[1]))
sk_model = ml.pick_model(model_name, pgrid=param_grid)
search_space = ml.param_grid
# define search
gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space,
cv=cv, n_jobs=n_jobs,
scoring=scores_dict,
verbose=verbose,
refit=False, return_train_score=True)
gr_search.fit(X, y)
if isinstance(features, str):
features = [features]
if savepath is not None:
filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type,
model_name, '+'.join(features), '+'.join(
scorers), n_splits,
outer_split, param_grid, seed)
save_gridsearchcv_object(gr_search, savepath, filename)
return gr_search
def save_cv_params_to_file(cv_obj, path, name):
import pandas as pd
di = vars(cv_obj)
splitter_type = cv_obj.__repr__().split('(')[0]
di['splitter_type'] = splitter_type
(pd.DataFrame.from_dict(data=di, orient='index')
.to_csv(path / '{}.csv'.format(name), header=False))
print('{}.csv saved to {}.'.format(name, path))
return
def read_cv_params_and_instantiate(filepath):
import pandas as pd
from sklearn.model_selection import StratifiedKFold
df = pd.read_csv(filepath, header=None, index_col=0)
d = {}
for row in df.iterrows():
dd = | pd.to_numeric(row[1], errors='ignore') | pandas.to_numeric |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
],
)
@td.skip_if_no_scipy
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_functions_window_non_shrinkage_binary():
# corr/cov return a MI DataFrame
df = DataFrame(
[[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(["A", "B"], name="foo"),
index=Index(range(4), name="bar"),
)
df_expected = DataFrame(
columns=Index(["A", "B"], name="foo"),
index=pd.MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_skew_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases():
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr():
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr():
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types():
# GH12373
types_test = [np.dtype(f"f{width}") for width in [4, 8]]
types_test.extend(
[np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"]
)
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
def test_moment_functions_zero_length():
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
functions = [
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise():
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]),
)
df2_expected = DataFrame(
index=pd.MultiIndex.from_product(
[df2.index, df2.columns], names=["bar", "foo"]
),
columns=Index(["a"], name="foo"),
dtype="float64",
)
functions = [
lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),
]
for f in functions:
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
| tm.assert_frame_equal(df2_result, df2_expected) | pandas._testing.assert_frame_equal |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
pd.set_option('expand_frame_repr', False)
pd.set_option('precision', 4)
# constructing binary features
def process_embarked():
global df_titanic_data
# replacing the missing values with the most commmon value in the variable
df_titanic_data.Embarked[df_titanic_data.Embarked.isnull()] = df_titanic_data.Embarked.dropna().mode().values
# converting the values into numbers
df_titanic_data['Embarked'] = pd.factorize(df_titanic_data['Embarked'])[0]
# binarizing the constructed features
if keep_binary:
df_titanic_data = pd.concat([df_titanic_data, pd.get_dummies(df_titanic_data['Embarked']).rename(
columns=lambda x: 'Embarked_' + str(x))], axis=1)
# Define a helper function that can use RandomForestClassifier for handling the missing values of the age variable
def set_missing_ages():
global df_titanic_data
age_data = df_titanic_data[
['Age', 'Embarked', 'Fare', 'Parch', 'SibSp', 'Title_id', 'Pclass', 'Names', 'CabinLetter']]
input_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 1::]
target_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 0]
# Creating an object from the random forest regression function of sklearn<use the documentation for more details>
regressor = RandomForestRegressor(n_estimators=2000, n_jobs=-1)
# building the model based on the input values and target values above
regressor.fit(input_values_RF, target_values_RF)
# using the trained model to predict the missing values
predicted_ages = regressor.predict(age_data.loc[(df_titanic_data.Age.isnull())].values[:, 1::])
# Filling the predicted ages in the origial titanic dataframe
age_data.loc[(age_data.Age.isnull()), 'Age'] = predicted_ages
# Helper function for constructing features from the age variable
def process_age():
global df_titanic_data
# calling the set_missing_ages helper function to use random forest regression for predicting missing values of age
set_missing_ages()
# # scale the age variable by centering it around the mean with a unit variance
# if keep_scaled:
# scaler_preprocessing = preprocessing.StandardScaler()
# df_titanic_data['Age_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Age.reshape(-1, 1))
# construct a feature for children
df_titanic_data['isChild'] = np.where(df_titanic_data.Age < 13, 1, 0)
# bin into quartiles and create binary features
df_titanic_data['Age_bin'] = pd.qcut(df_titanic_data['Age'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Age_bin']).rename(columns=lambda y: 'Age_' + str(y))],
axis=1)
if keep_bins:
df_titanic_data['Age_bin_id'] = pd.factorize(df_titanic_data['Age_bin'])[0] + 1
if keep_bins and keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Age_bin_id_scaled'] = scaler_processing.fit_transform(
df_titanic_data.Age_bin_id.reshape(-1, 1))
if not keep_strings:
df_titanic_data.drop('Age_bin', axis=1, inplace=True)
# Helper function for constructing features from the passengers/crew names
def process_name():
global df_titanic_data
# getting the different names in the names variable
df_titanic_data['Names'] = df_titanic_data['Name'].map(lambda y: len(re.split(' ', y)))
# Getting titles for each person
df_titanic_data['Title'] = df_titanic_data['Name'].map(lambda y: re.compile(", (.*?)\.").findall(y)[0])
# handling the low occuring titles
df_titanic_data['Title'][df_titanic_data.Title == 'Jonkheer'] = 'Master'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Ms', 'Mlle'])] = 'Miss'
df_titanic_data['Title'][df_titanic_data.Title == 'Mme'] = 'Mrs'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Capt', 'Don', 'Major', 'Col', 'Sir'])] = 'Sir'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
# binarizing all the features
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Title']).rename(columns=lambda x: 'Title_' + str(x))],
axis=1)
# scalling
if keep_scaled:
scaler_preprocessing = preprocessing.StandardScaler()
df_titanic_data['Names_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Names.reshape(-1, 1))
# binning
if keep_bins:
df_titanic_data['Title_id'] = pd.factorize(df_titanic_data['Title'])[0] + 1
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df_titanic_data['Title_id_scaled'] = scaler.fit_transform(df_titanic_data.Title_id.reshape(-1, 1))
# Generate features from the cabin input variable
def process_cabin():
# refering to the global variable that contains the titanic examples
global df_titanic_data
# repllacing the missing value in cabin variable "U0"
df_titanic_data['Cabin'][df_titanic_data.Cabin.isnull()] = 'U0'
# the cabin number is a sequence of of alphanumerical digits, so we are going to create some features
# from the alphabetical part of it
df_titanic_data['CabinLetter'] = df_titanic_data['Cabin'].map(lambda l: get_cabin_letter(l))
df_titanic_data['CabinLetter'] = pd.factorize(df_titanic_data['CabinLetter'])[0]
# binarizing the cabin letters features
if keep_binary:
cletters = pd.get_dummies(df_titanic_data['CabinLetter']).rename(columns=lambda x: 'CabinLetter_' + str(x))
df_titanic_data = pd.concat([df_titanic_data, cletters], axis=1)
# creating features from the numerical side of the cabin
df_titanic_data['CabinNumber'] = df_titanic_data['Cabin'].map(lambda x: get_cabin_num(x)).astype(int) + 1
# scaling the feature
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['CabinNumber_scaled'] = scaler_processing.fit_transform(df_titanic_data.CabinNumber.reshape(-1, 1))
def get_cabin_letter(cabin_value):
# searching for the letters in the cabin alphanumerical value
letter_match = re.compile("([a-zA-Z]+)").search(cabin_value)
if letter_match:
return letter_match.group()
else:
return 'U'
def get_cabin_num(cabin_value):
# searching for the numbers in the cabin alphanumerical value
number_match = re.compile("([0-9]+)").search(cabin_value)
if number_match:
return number_match.group()
else:
return 0
# helper function for constructing features from the ticket fare variable
def process_fare():
global df_titanic_data
# handling the missing values by replacing it with the median feare
df_titanic_data['Fare'][np.isnan(df_titanic_data['Fare'])] = df_titanic_data['Fare'].median()
# zeros in the fare will cause some division problems so we are going to set them to 1/10th of the lowest fare
df_titanic_data['Fare'][np.where(df_titanic_data['Fare'] == 0)[0]] = df_titanic_data['Fare'][
df_titanic_data['Fare'].nonzero()[
0]].min() / 10
# Binarizing the features by binning them into quantiles
df_titanic_data['Fare_bin'] = pd.qcut(df_titanic_data['Fare'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, | pd.get_dummies(df_titanic_data['Fare_bin']) | pandas.get_dummies |
"""
this is compilation of useful functions that might be helpful to analyse BEAM-related data
"""
import matplotlib.pyplot as plt
import numpy as np
import time
import urllib
import pandas as pd
import re
import statistics
from urllib.error import HTTPError
from urllib import request
def get_output_path_from_s3_url(s3_url):
"""
transform s3 output path (from beam runs spreadsheet) into path to s3 output
that may be used as part of path to the file.
s3path = get_output_path_from_s3_url(s3url)
beam_log_path = s3path + '/beamLog.out'
"""
return s3_url \
.strip() \
.replace("s3.us-east-2.amazonaws.com/beam-outputs/index.html#", "beam-outputs.s3.amazonaws.com/")
def grep_beamlog(s3url, keywords):
"""
look for keywords in beamLog.out file of specified run
found rows with keywords will be printed out
"""
s3path = get_output_path_from_s3_url(s3url)
url = s3path + "/beamLog.out"
file = urllib.request.urlopen(url)
for b_line in file.readlines():
line = b_line.decode("utf-8")
for keyword in keywords:
if keyword in line:
print(line)
def parse_config(s3url, complain=True):
"""
parse beam config of beam run.
:param s3url: url to s3 output
:param complain: it will complain if there are many config values found with the same name
:return: dictionary config key -> config value
"""
s3path = get_output_path_from_s3_url(s3url)
url = s3path + "/fullBeamConfig.conf"
config = urllib.request.urlopen(url)
config_keys = ["flowCapacityFactor", "speedScalingFactor", "quick_fix_minCarSpeedInMetersPerSecond",
"activitySimEnabled", "transitCapacity",
"minimumRoadSpeedInMetersPerSecond", "fractionOfInitialVehicleFleet",
"agentSampleSizeAsFractionOfPopulation",
"simulationName", "directory", "generate_secondary_activities", "lastIteration",
"fractionOfPeopleWithBicycle",
"parkingStallCountScalingFactor", "parkingPriceMultiplier", "parkingCostScalingFactor", "queryDate",
"transitPrice", "transit_crowding", "transit_crowding_percentile",
"maxLinkLengthToApplySpeedScalingFactor", "max_destination_distance_meters",
"max_destination_choice_set_size",
"transit_crowding_VOT_multiplier", "transit_crowding_VOT_threshold",
"activity_file_path", "intercept_file_path", "additional_trip_utility",
"ModuleProbability_1", "ModuleProbability_2", "ModuleProbability_3", "ModuleProbability_4",
"BUS-DEFAULT", "RAIL-DEFAULT", "SUBWAY-DEFAULT"]
intercept_keys = ["bike_intercept", "car_intercept", "drive_transit_intercept", "ride_hail_intercept",
"ride_hail_pooled_intercept", "ride_hail_transit_intercept", "walk_intercept",
"walk_transit_intercept", "transfer"]
config_map = {}
default_value = ""
for conf_key in config_keys:
config_map[conf_key] = default_value
def set_value(key, line_value):
value = line_value.strip().replace("\"", "")
if key not in config_map:
config_map[key] = value
else:
old_val = config_map[key]
if old_val == default_value or old_val.strip() == value.strip():
config_map[key] = value
else:
if complain:
print("an attempt to rewrite config value with key:", key)
print(" value in the map \t", old_val)
print(" new rejected value\t", value)
physsim_names = ['JDEQSim', 'BPRSim', 'PARBPRSim', 'CCHRoutingAssignment']
def look_for_physsim_type(config_line):
for physsim_name in physsim_names:
if 'name={}'.format(physsim_name) in config_line:
set_value("physsim_type", "physsim_type = {}".format(physsim_name))
for b_line in config.readlines():
line = b_line.decode("utf-8").strip()
look_for_physsim_type(line)
for ckey in config_keys:
if ckey + "=" in line or ckey + "\"=" in line or '"' + ckey + ":" in line:
set_value(ckey, line)
for ikey in intercept_keys:
if ikey in line:
set_value(ikey, line)
return config_map
def get_calibration_text_data(s3url, commit=""):
"""
calculate the csv row with values for pasting them into spreadsheet with beam runs
:param s3url: url to beam run
:param commit: git commit to insert into resulting csv string
:return: the csv string
"""
def get_realized_modes_as_str(full_path, data_file_name='referenceRealizedModeChoice.csv'):
if data_file_name not in full_path:
path = get_output_path_from_s3_url(full_path) + "/" + data_file_name
else:
path = get_output_path_from_s3_url(full_path)
df = pd.read_csv(path,
names=['bike', 'car', 'cav', 'drive_transit', 'ride_hail', 'ride_hail_pooled',
'ride_hail_transit',
'walk', 'walk_transit'])
last_row = df.tail(1)
car = float(last_row['car'])
walk = float(last_row['walk'])
bike = float(last_row['bike'])
ride_hail = float(last_row['ride_hail'])
ride_hail_transit = float(last_row['ride_hail_transit'])
walk_transit = float(last_row['walk_transit'])
drive_transit = float(last_row['drive_transit'])
ride_hail_pooled = float(last_row['ride_hail_pooled'])
# car walk bike ride_hail ride_hail_transit walk_transit drive_transit ride_hail_pooled
result = "%f,%f,%f,%f,%f,%f,%f,%f" % (
car, walk, bike, ride_hail, ride_hail_transit, walk_transit, drive_transit, ride_hail_pooled)
return result
print("order: car | walk | bike | ride_hail | ride_hail_transit | walk_transit | drive_transit | ride_hail_pooled")
print("")
print('ordered realized mode choice:')
print('ordered commute realized mode choice:')
modes_section = get_realized_modes_as_str(s3url)
print(modes_section)
print(get_realized_modes_as_str(s3url, 'referenceRealizedModeChoice_commute.csv'))
print("")
config = parse_config(s3url)
def get_config_value(conf_value_name):
return config.get(conf_value_name, '=default').split('=')[-1]
intercepts = ["car_intercept", "walk_intercept", "bike_intercept", "ride_hail_intercept",
"ride_hail_transit_intercept",
"walk_transit_intercept", "drive_transit_intercept", "ride_hail_pooled_intercept", "transfer"]
print('order of intercepts:', "\n\t\t ".join(intercepts))
intercepts_sections = ', '.join(get_config_value(x) for x in intercepts)
print(intercepts_sections)
print("")
config_ordered = ["lastIteration", "agentSampleSizeAsFractionOfPopulation", "flowCapacityFactor",
"speedScalingFactor",
"quick_fix_minCarSpeedInMetersPerSecond", "minimumRoadSpeedInMetersPerSecond",
"fractionOfInitialVehicleFleet", "transitCapacity", "fractionOfPeopleWithBicycle",
"parkingStallCountScalingFactor", "transitPrice", "transit_crowding_VOT_multiplier",
"transit_crowding_VOT_threshold", "additional_trip_utility"]
print('order of config values:', "\n\t\t ".join(config_ordered))
config_section = ','.join(get_config_value(x) for x in config_ordered)
print(config_section)
print("")
print('the rest of configuration:')
for key, value in config.items():
if 'intercept' not in key and key not in config_ordered:
print(value)
print("")
grep_beamlog(s3url, ["Total number of links", "Number of persons:"])
return "{}, ,{},{}, , ,{}, ,{}".format(config_section, commit, s3url, modes_section, intercepts_sections)
def plot_simulation_vs_google_speed_comparison(s3url, iteration, compare_vs_3am, title=""):
"""
if google requests was enabled for specified run for specified iteration, then
plots comparison of google speeds vs simulation speeds for the same set of routes
:param iteration: iteration of simulation
:param s3url: url to s3 output
:param compare_vs_3am: if comparison should be done vs google 3am speeds (relaxed speed) instead of regular route time
:param title: main title for all plotted graphs. useful for future copy-paste to distinguish different simulations
"""
s3path = get_output_path_from_s3_url(s3url)
google_tt = pd.read_csv(s3path + "/ITERS/it.{0}/{0}.googleTravelTimeEstimation.csv".format(iteration))
google_tt_3am = google_tt[google_tt['departureTime'] == 3 * 60 * 60].copy()
google_tt_rest = google_tt[
(google_tt['departureTime'] != 3 * 60 * 60) & (google_tt['departureTime'] < 24 * 60 * 60)].copy()
google_tt_column = 'googleTravelTimeWithTraffic'
google_tt_column3am = 'googleTravelTimeWithTraffic'
def get_speed(distance, travel_time):
# travel time may be -1 for some google requests because of some google errors
if travel_time <= 0:
return 0
else:
return distance / travel_time
def get_uid(row):
return "{}:{}:{}:{}:{}".format(row['vehicleId'], row['originLat'], row['originLng'], row['destLat'],
row['destLng'])
if compare_vs_3am:
google_tt_3am['googleDistance3am'] = google_tt_3am['googleDistance']
google_tt_3am['google_api_speed_3am'] = google_tt_3am.apply(
lambda row: (get_speed(row['googleDistance'], row[google_tt_column3am])), axis=1)
google_tt_3am['uid'] = google_tt_3am.apply(get_uid, axis=1)
google_tt_3am = google_tt_3am.groupby('uid')['uid', 'google_api_speed_3am', 'googleDistance3am'] \
.agg(['min', 'mean', 'max']).copy()
google_tt_3am.reset_index(inplace=True)
google_tt_rest['google_api_speed'] = google_tt_rest.apply(
lambda row: (get_speed(row['googleDistance'], row[google_tt_column])), axis=1)
google_tt_rest['sim_speed'] = google_tt_rest.apply(lambda row: (get_speed(row['legLength'], row['simTravelTime'])),
axis=1)
google_tt_rest['uid'] = google_tt_rest.apply(get_uid, axis=1)
df = google_tt_rest \
.groupby(['uid', 'departureTime'])[[google_tt_column, 'googleDistance', 'google_api_speed', 'sim_speed']] \
.agg({google_tt_column: ['min', 'mean', 'max'],
'googleDistance': ['min', 'mean', 'max'],
'google_api_speed': ['min', 'mean', 'max'], 'sim_speed': ['min']}) \
.copy()
df.reset_index(inplace=True)
if compare_vs_3am:
df = df.join(google_tt_3am.set_index('uid'), on='uid')
df['departure_hour'] = df['departureTime'] // 3600
df.columns = ['{}_{}'.format(x[0], x[1]) for x in df.columns]
df['sim_speed'] = df['sim_speed_min']
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(22, 5))
fig.tight_layout(pad=0.1)
fig.subplots_adjust(wspace=0.15, hspace=0.1)
fig.suptitle(title, y=1.11)
title0 = "Trip-by-trip speed comparison"
title1 = "Hour-by-hour average speed comparison"
if compare_vs_3am:
title0 = title0 + " at 3am"
title1 = title1 + " at 3am"
def plot_hist(google_column_name, label):
df[label] = df['sim_speed'] - df[google_column_name]
df[label].plot.kde(bw_method=0.2, ax=ax0)
if compare_vs_3am:
plot_hist('google_api_speed_3am_max', 'Maximum estimate')
else:
plot_hist('google_api_speed_max', 'Maximum estimate')
plot_hist('google_api_speed_mean', 'Mean estimate')
plot_hist('google_api_speed_min', 'Minimum estimate')
ax0.axvline(0, color="black", linestyle="--")
ax0.set_title(title0)
ax0.legend(loc='upper left')
ax0.set_xlabel('Difference in speed (m/s)')
ax0.set_ylabel('Density')
to_plot_df_speed_0 = df.groupby(['departure_hour_']).mean()
to_plot_df_speed_0['departure_hour_'] = to_plot_df_speed_0.index
if compare_vs_3am:
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_3am_min', label='Minimum estimate 3am', ax=ax1)
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_3am_mean', label='Mean estimate 3am', ax=ax1)
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_3am_max', label='Maximum estimate 3am', ax=ax1)
else:
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_min', label='Minimum estimate', ax=ax1)
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_mean', label='Mean estimate', ax=ax1)
to_plot_df_speed_0.plot(x='departure_hour_', y='google_api_speed_max', label='Maximum estimate', ax=ax1)
to_plot_df_speed_0.plot(x='departure_hour_', y='sim_speed', label='Simulated Speed', ax=ax1)
ax1.legend(loc='upper right')
ax1.set_title(title1)
ax1.set_xlabel('Hour of day')
ax1.set_ylabel('Speed (m/s)')
def print_spreadsheet_rows(s3urls, commit, iteration):
calibration_text = []
for s3url in s3urls:
main_text = get_calibration_text_data(s3url, commit=commit)
fake_walkers_file_name = "{}.fake_real_walkers.csv.gz".format(iteration)
fake_walkers = get_from_s3(s3url, fake_walkers_file_name)
s3path = get_output_path_from_s3_url(s3url)
replanning_path = s3path + "/ITERS/it.{0}/{0}.replanningEventReason.csv".format(iteration)
replanning_reasons = pd.read_csv(replanning_path)
print('\nreplanning_reasons:\n', replanning_reasons, '\n\n')
walk_transit_exhausted = \
replanning_reasons[replanning_reasons['ReplanningReason'] == 'ResourceCapacityExhausted WALK_TRANSIT'][
'Count'].values[0]
calibration_text.append((main_text, fake_walkers, walk_transit_exhausted))
print("\n\nspreadsheet text:")
for (text, _, _) in calibration_text:
print(text)
print("\n")
print("\n\nfake walkers:")
for (_, fake_walkers, _) in calibration_text:
if fake_walkers is None:
print("Not Available")
else:
print(fake_walkers['fake_walkers_ratio'].values[0] * 100)
print("\n")
print("\n\nResourceCapacityExhausted WALK_TRANSIT:")
for (_, _, text) in calibration_text:
print(text)
print("\n")
def plot_fake_real_walkers(title, fake_walkers, real_walkers, threshold):
fig, axs = plt.subplots(2, 2, figsize=(24, 4 * 2))
fig.tight_layout()
fig.subplots_adjust(wspace=0.05, hspace=0.2)
fig.suptitle(title, y=1.11)
ax1 = axs[0, 0]
ax2 = axs[0, 1]
fake_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='fake walkers')
real_walkers['length'].hist(bins=50, ax=ax1, alpha=0.3, label='real walkers')
ax1.legend(loc='upper right', prop={'size': 10})
ax1.set_title("Trip length histogram. Fake vs Real walkers. Min length of trip is {0}".format(threshold))
ax1.axvline(5000, color="black", linestyle="--")
fake_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='fake walkers')
real_walkers['length'].hist(bins=50, ax=ax2, log=True, alpha=0.3, label='real walkers')
ax2.legend(loc='upper right', prop={'size': 10})
ax2.set_title(
"Trip length histogram. Fake vs Real walkers. Logarithmic scale. Min length of trip is {0}".format(threshold))
ax2.axvline(5000, color="black", linestyle="--")
ax1 = axs[1, 0]
ax2 = axs[1, 1]
long_real_walkers = real_walkers[real_walkers['length'] >= threshold]
number_of_top_alternatives = 5
walkers_by_alternative = long_real_walkers.groupby('availableAlternatives')['length'].count().sort_values(
ascending=False)
top_alternatives = set(
walkers_by_alternative.reset_index()['availableAlternatives'].head(number_of_top_alternatives))
for alternative in top_alternatives:
label = str(list(set(alternative.split(':')))).replace('\'', '')[1:-1]
selected = long_real_walkers[long_real_walkers['availableAlternatives'] == alternative]['length']
selected.hist(bins=50, ax=ax1, alpha=0.4, linewidth=4, label=label)
selected.hist(bins=20, ax=ax2, log=True, histtype='step', linewidth=4, label=label)
ax1.set_title("Length histogram of top {} alternatives of real walkers".format(number_of_top_alternatives))
ax1.legend(loc='upper right', prop={'size': 10})
ax2.set_title(
"Length histogram of top {} alternatives of real walkers. Logarithmic scale".format(number_of_top_alternatives))
ax2.legend(loc='upper right', prop={'size': 10})
def get_fake_real_walkers(s3url, iteration, threshold=2000):
s3path = get_output_path_from_s3_url(s3url)
events_file_path = s3path + "/ITERS/it.{0}/{0}.events.csv.gz".format(iteration)
start_time = time.time()
modechoice = pd.concat([df[(df['type'] == 'ModeChoice') | (df['type'] == 'Replanning')]
for df in | pd.read_csv(events_file_path, low_memory=False, chunksize=100000) | pandas.read_csv |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import os
import six
import json
import shutil
import sqlite3
import pandas as pd
import gramex.cache
from io import BytesIO
from lxml import etree
from nose.tools import eq_, ok_
from gramex import conf
from gramex.http import BAD_REQUEST, FOUND
from gramex.config import variables, objectpath, merge
from orderedattrdict import AttrDict, DefaultAttrDict
from pandas.util.testing import assert_frame_equal as afe
from . import folder, TestGramex, dbutils, tempfiles
xlsx_mime_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
class TestFormHandler(TestGramex):
sales = gramex.cache.open(os.path.join(folder, 'sales.xlsx'), 'xlsx')
@classmethod
def setUpClass(cls):
dbutils.sqlite_create_db('formhandler.db', sales=cls.sales)
@classmethod
def tearDownClass(cls):
try:
dbutils.sqlite_drop_db('formhandler.db')
except OSError:
pass
def check_filter(self, url, df=None, na_position='last', key=None):
# Modelled on testlib.test_data.TestFilter.check_filter
def eq(args, expected):
result = self.get(url, params=args).json()
actual = pd.DataFrame(result[key] if key else result)
expected.index = actual.index
if len(expected) > 0:
afe(actual, expected, check_like=True)
sales = self.sales if df is None else df
eq({}, sales)
eq({'देश': ['भारत']},
sales[sales['देश'] == 'भारत'])
eq({'city': ['Hyderabad', 'Coimbatore']},
sales[sales['city'].isin(['Hyderabad', 'Coimbatore'])])
eq({'product!': ['Biscuit', 'Crème']},
sales[~sales['product'].isin(['Biscuit', 'Crème'])])
eq({'city>': ['Bangalore'], 'city<': ['Singapore']},
sales[(sales['city'] > 'Bangalore') & (sales['city'] < 'Singapore')])
eq({'city>~': ['Bangalore'], 'city<~': ['Singapore']},
sales[(sales['city'] >= 'Bangalore') & (sales['city'] <= 'Singapore')])
eq({'city~': ['ore']},
sales[sales['city'].str.contains('ore')])
eq({'product': ['Biscuit'], 'city': ['Bangalore'], 'देश': ['भारत']},
sales[(sales['product'] == 'Biscuit') & (sales['city'] == 'Bangalore') &
(sales['देश'] == 'भारत')])
eq({'city!~': ['ore']},
sales[~sales['city'].str.contains('ore')])
eq({'sales>': ['100'], 'sales<': ['1000']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000)])
eq({'growth<': [0.5]},
sales[sales['growth'] < 0.5])
eq({'sales>': ['100'], 'sales<': ['1000'], 'growth<': ['0.5']},
sales[(sales['sales'] > 100) & (sales['sales'] < 1000) & (sales['growth'] < 0.5)])
eq({'देश': ['भारत'], '_sort': ['sales']},
sales[sales['देश'] == 'भारत'].sort_values('sales', na_position=na_position))
eq({'product<~': ['Biscuit'], '_sort': ['-देश', '-growth']},
sales[sales['product'] == 'Biscuit'].sort_values(
['देश', 'growth'], ascending=[False, False], na_position=na_position))
eq({'देश': ['भारत'], '_offset': ['4'], '_limit': ['8']},
sales[sales['देश'] == 'भारत'].iloc[4:12])
cols = ['product', 'city', 'sales']
eq({'देश': ['भारत'], '_c': cols},
sales[sales['देश'] == 'भारत'][cols])
ignore_cols = ['product', 'city']
eq({'देश': ['भारत'], '_c': ['-' + c for c in ignore_cols]},
sales[sales['देश'] == 'भारत'][[c for c in sales.columns if c not in ignore_cols]])
# Non-existent column does not raise an error for any operation
for op in ['', '~', '!', '>', '<', '<~', '>', '>~']:
eq({'nonexistent' + op: ['']}, sales)
# Non-existent sorts do not raise an error
eq({'_sort': ['nonexistent', 'sales']},
sales.sort_values('sales', na_position=na_position))
# Non-existent _c does not raise an error
eq({'_c': ['nonexistent', 'sales']}, sales[['sales']])
# Invalid limit or offset raise an error
eq_(self.get(url, params={'_limit': ['abc']}).status_code, BAD_REQUEST)
eq_(self.get(url, params={'_offset': ['abc']}).status_code, BAD_REQUEST)
# Check if metadata is returned properly
def meta_headers(url, params):
r = self.get(url, params=params)
result = DefaultAttrDict(AttrDict)
for header_name, value in r.headers.items():
name = header_name.lower()
if name.startswith('fh-'):
parts = name.split('-')
dataset_name, key = '-'.join(parts[1:-1]), parts[-1]
result[dataset_name][key] = json.loads(value)
return result
header_key = 'data' if key is None else key
headers = meta_headers(url, {'_meta': 'y'})[header_key]
eq_(headers.offset, 0)
eq_(headers.limit, conf.handlers.FormHandler.default._limit)
# There may be some default items pass as ignored or sort or filter.
# Just check that this is a list
ok_(isinstance(headers.filters, list))
ok_(isinstance(headers.ignored, list))
ok_(isinstance(headers.sort, list))
if 'count' in headers:
eq_(headers.count, len(sales))
headers = meta_headers(url, {
'_meta': 'y',
'देश': 'USA',
'c': ['city', 'product', 'sales'],
'_sort': '-sales',
'_limit': 10,
'_offset': 3
})[header_key]
ok_(['देश', '', ['USA']] in headers.filters)
ok_(['c', ['city', 'product', 'sales']] in headers.ignored)
ok_(['sales', False] in headers.sort)
ok_(headers.offset, 3)
ok_(headers.limit, 10)
if 'count' in headers:
eq_(headers.count, (sales['देश'] == 'USA').sum())
def eq(self, url, expected):
out = self.get(url).content
actual = pd.read_csv(BytesIO(out), encoding='utf-8')
expected.index = range(len(expected))
afe(actual, expected, check_column_type=six.PY3)
def test_file(self):
self.check_filter('/formhandler/file', na_position='last')
self.check_filter('/formhandler/url', na_position='last')
self.check_filter('/formhandler/file-multi', na_position='last', key='big',
df=self.sales[self.sales['sales'] > 100])
self.check_filter('/formhandler/file-multi', na_position='last', key='by-growth',
df=self.sales.sort_values('growth'))
def test_sqlite(self):
self.check_filter('/formhandler/sqlite', na_position='first')
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='big',
df=self.sales[self.sales['sales'] > 100])
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='by-growth',
df=self.sales.sort_values('growth'))
self.check_filter('/formhandler/sqlite-multi', na_position='last', key='big-by-growth',
df=self.sales[self.sales['sales'] > 100].sort_values('growth'))
self.check_filter('/formhandler/sqlite-queryfunction', na_position='last')
self.check_filter('/formhandler/sqlite-queryfunction?ct=Hyderabad&ct=Coimbatore',
na_position='last',
df=self.sales[self.sales['city'].isin(['Hyderabad', 'Coimbatore'])])
def test_mysql(self):
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
self.check_filter('/formhandler/mysql', na_position='first')
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_postgres(self):
dbutils.postgres_create_db(variables.POSTGRES_SERVER, 'test_formhandler', sales=self.sales)
try:
self.check_filter('/formhandler/postgres', na_position='last')
finally:
dbutils.postgres_drop_db(variables.POSTGRES_SERVER, 'test_formhandler')
def test_default(self):
cutoff, limit = 50, 2
self.eq('/formhandler/default', self.sales[self.sales['sales'] > cutoff].head(limit))
def test_function(self):
self.eq('/formhandler/file-function?col=sales&_format=csv', self.sales[['sales']])
self.eq('/formhandler/file-function?col=देश&col=product&_format=csv',
self.sales[['देश', 'product']])
def test_modify(self):
self.eq('/formhandler/modify', self.sales.sum(numeric_only=True).to_frame().T)
def test_modify_multi(self):
city = self.sales.groupby('city')['sales'].sum().reset_index()
city['rank'] = city['sales'].rank()
big = self.sales[self.sales['sales'] > 100]
self.eq('/formhandler/modify-multi', big.merge(city, on='city'))
def test_prepare(self):
self.eq('/formhandler/prepare', self.sales[self.sales['product'] == 'Biscuit'])
def test_download(self):
# Modelled on testlib.test_data.TestDownload
big = self.sales[self.sales['sales'] > 100]
by_growth = self.sales.sort_values('growth')
big.index = range(len(big))
by_growth.index = range(len(by_growth))
out = self.get('/formhandler/file?_format=html')
# Note: In Python 2, pd.read_html returns .columns.inferred_type=mixed
# instead of unicde. So check column type only in PY3 not PY2
afe(pd.read_html(out.content, encoding='utf-8')[0], self.sales, check_column_type=six.PY3)
eq_(out.headers['Content-Type'], 'text/html;charset=UTF-8')
eq_(out.headers.get('Content-Disposition'), None)
out = self.get('/formhandler/file-multi?_format=html')
result = pd.read_html(BytesIO(out.content), encoding='utf-8')
afe(result[0], big, check_column_type=six.PY3)
afe(result[1], by_growth, check_column_type=six.PY3)
eq_(out.headers['Content-Type'], 'text/html;charset=UTF-8')
eq_(out.headers.get('Content-Disposition'), None)
out = self.get('/formhandler/file?_format=xlsx')
afe(pd.read_excel(BytesIO(out.content)), self.sales)
eq_(out.headers['Content-Type'], xlsx_mime_type)
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.xlsx')
out = self.get('/formhandler/file-multi?_format=xlsx')
result = pd.read_excel(BytesIO(out.content), sheet_name=None)
afe(result['big'], big)
afe(result['by-growth'], by_growth)
eq_(out.headers['Content-Type'], xlsx_mime_type)
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.xlsx')
out = self.get('/formhandler/file?_format=csv')
ok_(out.content.startswith(''.encode('utf-8-sig')))
afe(pd.read_csv(BytesIO(out.content), encoding='utf-8'), self.sales)
eq_(out.headers['Content-Type'], 'text/csv;charset=UTF-8')
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.csv')
out = self.get('/formhandler/file-multi?_format=csv')
lines = out.content.splitlines(True)
eq_(lines[0], 'big\n'.encode('utf-8-sig'))
actual = pd.read_csv(BytesIO(b''.join(lines[1:len(big) + 2])), encoding='utf-8')
afe(actual, big)
eq_(lines[len(big) + 3], 'by-growth\n'.encode('utf-8'))
actual = pd.read_csv(BytesIO(b''.join(lines[len(big) + 4:])), encoding='utf-8')
afe(actual, by_growth)
eq_(out.headers['Content-Type'], 'text/csv;charset=UTF-8')
eq_(out.headers['Content-Disposition'], 'attachment;filename=data.csv')
for fmt in ['csv', 'html', 'json', 'xlsx']:
out = self.get('/formhandler/file?_format=%s&_download=test.%s' % (fmt, fmt))
eq_(out.headers['Content-Disposition'], 'attachment;filename=test.%s' % fmt)
out = self.get('/formhandler/file-multi?_format=%s&_download=test.%s' % (fmt, fmt))
eq_(out.headers['Content-Disposition'], 'attachment;filename=test.%s' % fmt)
@staticmethod
def copy_file(source, target):
target = os.path.join(folder, target)
source = os.path.join(folder, source)
shutil.copyfile(source, target)
tempfiles[target] = target
return target
def call(self, url, args, method, headers):
r = self.check('/formhandler/edits-' + url, data=args, method=method, headers=headers)
meta = r.json()
# meta has 'ignored' with list of ignored columns
ok_(['x', args.get('x', [1])] in objectpath(meta, 'data.ignored'))
# meta has 'filters' for PUT and DELETE. It is empty for post
if method.lower() == 'post':
eq_(objectpath(meta, 'data.filters'), [])
else:
ok_(isinstance(objectpath(meta, 'data.filters'), list))
return r
def check_edit(self, method, source, args, count):
# Edits the correct count of records, returns empty value and saves to file
target = self.copy_file('sales.xlsx', 'sales-edits.xlsx')
self.call('xlsx-' + source, args, method, {'Count-Data': str(count)})
result = gramex.cache.open(target)
# Check result. TODO: check that the values are correctly added
if method == 'delete':
eq_(len(result), len(self.sales) - count)
elif method == 'post':
eq_(len(result), len(self.sales) + count)
elif method == 'put':
eq_(len(result), len(self.sales))
target = os.path.join(folder, 'formhandler-edits.db')
dbutils.sqlite_create_db(target, sales=self.sales)
tempfiles[target] = target
self.call('sqlite-' + source, args, method, {'Count-Data': str(count)})
# Check result. TODO: check that the values are correctly added
con = sqlite3.connect(target)
result = pd.read_sql('SELECT * FROM sales', con)
# TODO: check that the values are correctly added
if method == 'delete':
eq_(len(result), len(self.sales) - count)
elif method == 'post':
eq_(len(result), len(self.sales) + count)
elif method == 'put':
eq_(len(result), len(self.sales))
def test_invalid_edit(self):
self.copy_file('sales.xlsx', 'sales-edits.xlsx')
for method in ['delete', 'put']:
# Editing with no ID columns defined raises an error
self.check('/formhandler/file?city=A&product=B', method=method, code=400)
# Edit record without ID columns in args raises an error
self.check('/formhandler/edits-xlsx-multikey', method=method, code=400)
self.check('/formhandler/edits-xlsx-singlekey', method=method, code=400)
def test_edit_singlekey(self):
# Operations with a single key works
self.check_edit('post', 'singlekey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Crème'],
'sales': ['100'],
'growth': ['0.32'],
}, count=1)
self.check_edit('put', 'singlekey', {
'sales': ['513.7'],
'city': [123],
'product': ['abc'],
}, count=1)
# Delete with single ID as primary key works
self.check_edit('delete', 'singlekey', {
'sales': ['513.7']
}, count=1)
def test_edit_multikey_single_value(self):
# POST single value
self.check_edit('post', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Alpha'],
'sales': ['100'],
}, count=1)
self.check_edit('put', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Eggs'],
'sales': ['100'],
'growth': ['0.32'],
}, count=1)
self.check_edit('delete', 'multikey', {
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Crème'],
}, count=1)
def test_edit_multikey_multi_value(self):
self.check_edit('post', 'multikey', {
'देश': ['भारत', 'भारत', 'भारत'],
'city': ['Bangalore', 'Bangalore', 'Bangalore'],
'product': ['Alpha', 'Beta', 'Gamma'],
'sales': ['100', '', '300'],
'growth': ['0.32', '0.50', '0.12'],
# There is a default ?x=1. Override that temporarily
'x': ['', '', '']
}, count=3)
# NOTE: PUT behaviour for multi-value is undefined
self.check_edit('delete', 'multikey', {
'देश': ['भारत', 'भारत', 'भारत', 'invalid'],
'city': ['Bangalore', 'Bangalore', 'Bangalore', 'invalid'],
'product': ['芯片', 'Eggs', 'Biscuit', 'invalid'],
}, count=3)
def test_edit_redirect(self):
self.copy_file('sales.xlsx', 'sales-edits.xlsx')
# redirect: affects POST, PUT and DELETE
for method in ['post', 'put', 'delete']:
r = self.get('/formhandler/edits-xlsx-redirect', method=method, data={
'देश': ['भारत'],
'city': ['Bangalore'],
'product': ['Eggs'],
'sales': ['100'],
}, allow_redirects=False)
eq_(r.status_code, FOUND)
ok_('Count-Data' in r.headers) # Any value is fine, we're not checking that
eq_(r.headers['Location'], '/redirected')
# GET is not redirected
r = self.get('/formhandler/edits-xlsx-redirect', allow_redirects=False)
ok_('Location' not in r.headers)
def test_edit_multidata(self):
csv_path = os.path.join(folder, 'sales-edits.csv')
self.sales.to_csv(csv_path, index=False, encoding='utf-8')
tempfiles[csv_path] = csv_path
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
row = {'देश': 'भारत', 'city': 'X', 'product': 'Q', 'growth': None}
self.check('/formhandler/edits-multidata', method='post', data={
'csv:देश': ['भारत'],
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['10'],
'sql:देश': ['भारत'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['20'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 10}))
eq_(data['sql'][-1], merge(row, {'sales': 20}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
self.check('/formhandler/edits-multidata', method='put', data={
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['30'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['40'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 30}))
eq_(data['sql'][-1], merge(row, {'sales': 40}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
self.check('/formhandler/edits-multidata', method='delete', data={
'csv:city': ['X'],
'csv:product': ['Q'],
'sql:city': ['X'],
'sql:product': ['Q'],
}, headers={
'count-csv': '1',
'count-sql': '1',
})
data = self.check('/formhandler/edits-multidata').json()
eq_(len(data['csv']), len(self.sales))
eq_(len(data['sql']), len(self.sales))
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_edit_multidata_modify(self):
csv_path = os.path.join(folder, 'sales-edits.csv')
self.sales.to_csv(csv_path, index=False, encoding='utf-8')
tempfiles[csv_path] = csv_path
dbutils.mysql_create_db(variables.MYSQL_SERVER, 'test_formhandler', sales=self.sales)
try:
row = {'देश': 'भारत', 'city': 'X', 'product': 'Q', 'growth': None}
result = self.check('/formhandler/edits-multidata-modify', method='post', data={
'csv:देश': ['भारत'],
'csv:city': ['X'],
'csv:product': ['Q'],
'csv:sales': ['10'],
'sql:देश': ['भारत'],
'sql:city': ['X'],
'sql:product': ['Q'],
'sql:sales': ['20'],
}, headers={
'count-csv': '1',
'count-sql': '1',
}).json()
eq_(result['csv']['modify'], 8)
eq_(result['modify'], 8)
data = self.check('/formhandler/edits-multidata').json()
eq_(data['csv'][-1], merge(row, {'sales': 10}))
eq_(data['sql'][-1], merge(row, {'sales': 20}))
eq_(len(data['csv']), len(self.sales) + 1)
eq_(len(data['sql']), len(self.sales) + 1)
finally:
dbutils.mysql_drop_db(variables.MYSQL_SERVER, 'test_formhandler')
def test_edit_json(self):
target = self.copy_file('sales.xlsx', 'sales-edits.xlsx')
target = os.path.join(folder, 'formhandler-edits.db')
dbutils.sqlite_create_db(target, sales=self.sales)
tempfiles[target] = target
for fmt in ('xlsx', 'sqlite'):
kwargs = {
'url': '/formhandler/edits-%s-multikey' % fmt,
'request_headers': {'Content-Type': 'application/json'},
}
# POST 2 records. Check that 2 records where added
self.check(method='post', data=json.dumps({
'देश': ['भारत', 'USA'],
'city': ['HYD', 'NJ'],
'product': ['खुश', 'खुश'],
'sales': [100, 200],
}), headers={'Count-Data': '2'}, **kwargs)
eq_(self.get(kwargs['url'], params={'product': 'खुश'}).json(), [
{'देश': 'भारत', 'city': 'HYD', 'product': 'खुश', 'sales': 100.0, 'growth': None},
{'देश': 'USA', 'city': 'NJ', 'product': 'खुश', 'sales': 200.0, 'growth': None},
])
# PUT a record. Check that the record was changed
self.check(method='put', data=json.dumps({
'city': ['HYD'],
'product': ['खुश'],
'sales': [300],
'growth': [0.3],
}), headers={'Count-Data': '1'}, **kwargs)
eq_(self.get(kwargs['url'], params={'city': 'HYD', 'product': 'खुश'}).json(), [
{'देश': 'भारत', 'city': 'HYD', 'product': 'खुश', 'sales': 300.0, 'growth': 0.3},
])
# DELETE 2 records one by one. Check that 2 records were deleted
self.check(method='delete', data=json.dumps({
'city': ['HYD'],
'product': ['खुश'],
}), headers={'Count-Data': '1'}, **kwargs)
self.check(method='delete', data=json.dumps({
'city': ['NJ'],
'product': ['खुश'],
}), headers={'Count-Data': '1'}, **kwargs)
eq_(self.get(kwargs['url'], params={'product': 'खुश'}).json(), [])
def test_chart(self):
r = self.get('/formhandler/chart', data={
'_format': 'svg',
'chart': 'barplot',
'x': 'देश',
'y': 'sales',
'dpi': 72,
'width': 500,
'height': 300,
})
tree = etree.fromstring(r.text.encode('utf-8'))
eq_(tree.get('viewBox'), '0 0 500 300')
# TODO: expand on test cases
# Check spec, data for vega, vega-lite, vegam formats
base = '/formhandler/chart?_format={}'
data = pd.DataFrame(self.get(base.format('json')).json())
for fmt in {'vega', 'vega-lite', 'vegam'}:
r = self.get(base.format(fmt))
var = json.loads(re.findall(r'}\)\((.*?)}\)', r.text)[-1] + '}')
var = var['spec']
if 'fromjson' in var:
df = var['fromjson'][0]['data']
var['fromjson'][0]['data'] = '__DATA__'
else:
df = var.pop('data')
df = (df[0] if isinstance(df, list) else df)['values']
yaml_path = os.path.join(folder, '{}.yaml'.format(fmt))
spec = gramex.cache.open(yaml_path, 'yaml')
afe(pd.DataFrame(df), data)
self.assertDictEqual(var, spec)
def test_headers(self):
self.check('/formhandler/headers', headers={
'X-JSON': 'ok', 'X-Base': 'ok', 'X-Root': 'ok'
})
def test_args(self):
# url: and sheet_name: accepts query formatting for files
url = '/formhandler/arg-url?path=sales&sheet=sales&ext=excel'
afe(pd.DataFrame(self.get(url).json()), self.sales, check_like=True)
url = '/formhandler/arg-url?path=sales&sheet=census'
census = gramex.cache.open(os.path.join(folder, 'sales.xlsx'), sheet_name='census')
afe(pd.DataFrame(self.get(url).json()), census, check_like=True)
# url: and table: accept query formatting for SQLAlchemy
url = '/formhandler/arg-table?db=formhandler&table=sales'
afe(pd.DataFrame(self.get(url).json()), self.sales, check_like=True)
# url: and table: accept query formatting for SQLAlchemy
# TODO: In Python 2, unicode keys don't work well on Tornado. So use safe keys
key, val = ('product', '芯片') if six.PY2 else ('देश', 'भारत')
url = '/formhandler/arg-query?db=formhandler&col=%s&val=%s' % (key, val)
actual = pd.DataFrame(self.get(url).json())
expected = self.sales[self.sales[key] == val]
expected.index = actual.index
afe(actual, expected, check_like=True)
# Files with ../ etc should be skipped
self.check('/formhandler/arg-url?path=../sales',
code=500, text='KeyError')
# Test that the ?skip= parameter is used to find the table.
self.check('/formhandler/arg-table?db=formhandler&table=sales&skip=ab',
code=500, text='NoSuchTableError')
# Spaces are ignored in SQLAlchemy query. So ?skip= will be a missing key
self.check('/formhandler/arg-table?db=formhandler&table=sales&skip=a b',
code=500, text='KeyError')
def test_path_arg(self):
url = '/formhandler/%s/formhandler/sales?group=product&col=city&val=Bangalore'
for sub_url in ['path_arg', 'path_kwarg']:
actual = pd.DataFrame(self.get(url % sub_url).json())
expected = self.sales[self.sales['city'] == 'Bangalore'].groupby('product')
expected = expected['sales'].sum().reset_index()
| afe(actual, expected, check_like=True) | pandas.util.testing.assert_frame_equal |
import matplotlib.pyplot as plt
# from sklearn import metrics
from sklearn.metrics import roc_curve, auc, confusion_matrix
from sklearn import preprocessing
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
def compute_rates(y_test, y_pred, pos_label=1):
fpr = dict()
tpr = dict()
roc_auc = dict()
n_classes = 1
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i], pos_label=pos_label)
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
return fpr, tpr, roc_auc
def get_data_28_svm():
file = "uscecchini28.csv"
df = pd.read_csv(file)
y = df['misstate']
y = preprocessing.label_binarize(y, classes=[0, 1])
n_classes = y.shape[1]
x = df.iloc[:, 9:37]#.values
x = x.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
x = pd.DataFrame(x_scaled)
print(x.shape, y.shape)
return x, y
def adjust_pred(y_pred):
# return np.sqrt((y_pred - np.mean(y_pred))**2)
return y_pred
fraud_data = | pd.read_csv("fraud_acc.csv") | pandas.read_csv |
from datetime import datetime
from collections import Counter
from functools import partial
import pandas as pd
import mongoengine
import xlrd
import os
import re
def create_regex(s: str, initials: bool = True) -> str:
"""
Given a string representation of either a channel or marker, generate a standard
regex string to be used in a panel template
Parameters
----------
s: str
String value of channel or marker to generate regex term for
initials: bool, (default=True)
If True, account for use of initials to represent a marker/channel name
Returns
-------
str
Formatted regex string
"""
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
s = [i for ls in [_.split('-') for _ in s.split(' ')] for i in ls]
s = [i for ls in [_.split('.') for _ in s] for i in ls]
s = [i for ls in [_.split('/') for _ in s] for i in ls]
new_string = list()
for i in s:
if not has_numbers(i) and len(i) > 2 and initials:
new_string.append(f'{i[0]}({i[1:]})*')
else:
new_string.append(i)
new_string = '[\s.-]+'.join(new_string)
new_string = '<*\s*' + new_string + '\s*>*'
return new_string
def create_template(channel_mappings: list, file_name: str,
case_sensitive: bool = False, initials: bool = True):
"""
Given a list of channel mappings from an fcs file, create an excel template for Panel creation.
Parameters
----------
channel_mappings: list
List of channel mappings (list of dictionaries)
file_name: str
File name for saving excel template
case_sensitive: bool, (default=False)
If True, search terms for channels/markers will be case sensitive
initials: bool, (default=True)
If True, search terms for channels/markers will account for the use of initials of channels/markers
Returns
-------
None
"""
try:
assert file_name.split('.')[1] == 'xlsx', 'Invalid file name, must be of format "NAME.xlsx"'
except IndexError:
raise Exception('Invalid file name, must be of format "NAME.xlsx"')
mappings = pd.DataFrame()
mappings['channel'] = [cm['channel'] for cm in channel_mappings]
mappings['marker'] = [cm['marker'] for cm in channel_mappings]
nomenclature = pd.DataFrame()
names = mappings['channel'].tolist() + mappings['marker'].tolist()
nomenclature['name'] = [n for n in names if n]
f = partial(create_regex, initials=initials)
nomenclature['regex'] = nomenclature['name'].apply(f)
nomenclature['case'] = case_sensitive
nomenclature['permutations'] = None
writer = | pd.ExcelWriter(file_name, engine='xlsxwriter') | pandas.ExcelWriter |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
from scipy.special import gamma,gammainc,gammaincc
from scipy.stats import norm
from scipy.optimize import minimize,root_scalar
import networkx as nx
from operator import itemgetter
ep = 1e-80 #For preventing overflow errors in norm.cdf
tref = pd.to_datetime('2020-01-01') #Reference time for converting dates to numbers
################# FORMATTING ########################
def format_JH(url,drop_list,columns):
data = pd.read_csv(url)
if len(columns) == 2:
data[columns[1]] = data[columns[1]].fillna(value='NaN')
data = data.T.drop(drop_list).T.set_index(columns).T
data.index = pd.to_datetime(data.index,format='%m/%d/%y')
return data
def format_kaggle(folder,metric):
data_full = pd.read_csv(folder+'train.csv')
data = data_full.pivot_table(index='Date',columns=['Country_Region','Province_State'],values=metric)
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
return data
def format_predictions(path):
pred = pd.read_csv(path).fillna(value='NaN').set_index(['Country/Region','Province/State'])
for item in ['Nmax','Nmax_low','Nmax_high','sigma','sigma_low','sigma_high']:
pred[item] = pd.to_numeric(pred[item])
for item in ['th','th_low','th_high']:
pred[item] = pd.to_datetime(pred[item],format='%Y-%m-%d')
return pred
def load_sim(path):
data = pd.read_csv(path,index_col=0,header=[0,1])
data.index = pd.to_datetime(data.index,format='%Y-%m-%d')
for item in data.keys():
data[item] = pd.to_numeric(data[item])
return data
################# ESTIMATING PARAMETER VALUES ###############
def cbarr(t):
return 1/(np.sqrt(2*np.pi)*(1-norm.cdf(t)+ep))
def tmean(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
def tvar(tf,params):
th,sigma = params
tau = (tf-th)/sigma
return sigma**2*cbarr(-tau)*(np.sqrt(np.pi/2)*(1+np.sign(tau)*gammaincc(3/2,tau**2/2))-cbarr(-tau)*np.exp(-tau**2/2))
def cost_init(params,data,tf):
th,sigma = params
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tvar_sample = (((data.index.values-tmean_sample)**2)*data.values).sum()/data.values.sum()
return (tmean_sample-tmean(tf,params))**2 + (tvar_sample-tvar(tf,params))**2
################### COST FUNCTIONs #################
def cost_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
penalty = (sigma-mean_sigma)**2/(2*var_sigma)
else:
penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
return ((np.log(data.values)-prediction)**2).sum()/2 + penalty
def jac_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
dpenalty = (sigma-mean_sigma)/var_sigma
else:
dpenalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),-err.sum(),(tau*dlogNdt*err).sum()])+np.asarray([0,0,dpenalty])
def hess_p(params,data,prior):
th,logK,sigma = params
t = data.index.values
tau = (t-th)/sigma
if prior is not None:
mean_sigma, var_sigma = prior
d2penalty = 1/var_sigma
else:
d2penalty = 0
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt_s = np.exp(-tau**2/2)/(np.sqrt(2*np.pi)*(norm.cdf(tau)+ep))
dlogNdth = -dlogNdt_s/sigma
dlogNdlogK = np.ones(len(t))
dlogNdsig = -tau*dlogNdt_s/sigma
d2Ndth2_N = -tau*dlogNdt_s/sigma**2
d2Ndsig2_N = 2*tau*(1-tau**2/2)*dlogNdt_s/(sigma**2)
d2Ndsigdth_N = (1-2*tau**2/2)*dlogNdt_s/sigma**2
term1 = np.asarray([[((-d2Ndth2_N+dlogNdth**2)*err).sum(), 0, ((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum()],
[0, 0, 0],
[((-d2Ndsigdth_N+dlogNdth*dlogNdsig)*err).sum(), 0, ((-d2Ndsig2_N+dlogNdsig**2)*err).sum()]])
term2 = np.asarray([[(dlogNdth**2).sum(), (dlogNdth*dlogNdlogK).sum(), (dlogNdth*dlogNdsig).sum()],
[(dlogNdth*dlogNdlogK).sum(), (dlogNdlogK**2).sum(), (dlogNdsig*dlogNdlogK).sum()],
[(dlogNdth*dlogNdsig).sum(), (dlogNdsig*dlogNdlogK).sum(), (dlogNdsig**2).sum()]])
term3 = np.zeros((3,3))
term3[2,2] = d2penalty
return term1 + term2+ term3
def th_err(th,data,sigma,tf):
tmean_sample = (data.index.values*data.values).sum()/data.values.sum()
tau = (tf-th)/sigma
tmean = -sigma*cbarr(-tau)*np.exp(-tau**2/2)+th
return tmean_sample-tmean
def cost_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
return 0.5*((np.log(data.values)-prediction)**2).sum()
def jac_p_sig(params,data,sigma):
th,logK = params
t = data.index.values
tau = (t-th)/sigma
prediction = logK+np.log((norm.cdf(tau)+ep))
err = np.log(data.values)-prediction
dlogNdt = np.exp(-tau**2/2)/(np.sqrt(np.pi*2)*sigma*(norm.cdf(tau)+ep))
return np.asarray([(dlogNdt*err).sum(),
-err.sum()])
################## FITTING #####################
def fit_erf_sig(data,p0=5e2,sigma=7):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
out = root_scalar(th_err,args=(train,sigma,t[-1]),x0=th0,x1=th0+10)
th0 = out.root
tau0 = (t[-1]-th0)/sigma
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
params = [th0,logK0,sigma]
#Train the model
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
out = minimize(cost_p_sig,[th0,logK0],args=(train,sigma),jac=jac_p_sig,method='BFGS')
params = list(out.x)+[sigma,2*out.fun/len(train)]
return params
def fit_erf(data,p0=5e2,verbose=False,prior=None):
#Get initial conditions
train = data.loc[data>0].diff().iloc[1:]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = pd.to_numeric(train)
th0 = (t.values*train.values).sum()/train.values.sum()
sig0 = np.sqrt(((t-th0).values**2*train.values).sum()/train.values.sum())
tf = t[-1]
if prior is not None:
mean_sigma, var_sigma = prior
lb = mean_sigma-2*np.sqrt(var_sigma)
ub = mean_sigma+2*np.sqrt(var_sigma)
else:
lb = None
ub = None
out = minimize(cost_init,[th0,sig0],args=(train,tf),bounds=((None,None),(lb,ub)))
th0,sig0 = out.x
tau0 = (tf-th0)/sig0
logK0 = np.log(data.iloc[-1]/(norm.cdf(tau0)+ep))
#Fit the curve
train = data.loc[data>p0]
t = (train.index-tref)/timedelta(days=1)
train.index = t
train = | pd.to_numeric(train) | pandas.to_numeric |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
from lifelines import KaplanMeierFitter, NelsonAalenFitter
from lifelines import KaplanMeierFitter
from lifelines.plotting import add_at_risk_counts
def plot_kaplanmeier(outcomes, groups=None, plot_counts=False, **kwargs):
"""Plot a Kaplan-Meier Survival Estimator stratified by groups.
Parameters
----------
outcomes: pandas.DataFrame
a pandas dataframe containing the survival outcomes. The index of the
dataframe should be the same as the index of the features dataframe.
Should contain a column named 'time' that contains the survival time and
a column named 'event' that contains the censoring status.
\( \delta_i = 1 \) if the event is observed.
groups: pandas.Series
a pandas series containing the groups to stratify the Kaplan-Meier
estimates by.
plot_counts: bool
if True, plot the number of at risk and censored individuals in each group.
"""
if groups is None:
groups = np.array([1]*len(outcomes))
curves = {}
from matplotlib import pyplot as plt
ax = plt.subplot(111)
for group in sorted(set(groups)):
if pd.isna(group): continue
curves[group] = KaplanMeierFitter().fit(outcomes[groups==group]['time'],
outcomes[groups==group]['event'])
ax = curves[group].plot(label=group, ax=ax, **kwargs)
if plot_counts:
add_at_risk_counts(iter([curves[group] for group in curves]), ax=ax)
return ax
def plot_nelsonaalen(outcomes, groups=None, **kwargs):
"""Plot a Nelson-Aalen Survival Estimator stratified by groups.
Parameters
----------
outcomes: pandas.DataFrame
a pandas dataframe containing the survival outcomes. The index of the
dataframe should be the same as the index of the features dataframe.
Should contain a column named 'time' that contains the survival time and
a column named 'event' that contains the censoring status.
\( \delta_i = 1 \) if the event is observed.
groups: pandas.Series
a pandas series containing the groups to stratify the Kaplan-Meier
estimates by.
"""
if groups is None:
groups = np.array([1]*len(outcomes))
for group in sorted(set(groups)):
if | pd.isna(group) | pandas.isna |
import pandas as pd
import pandas_datareader as pdr
##### Naver Finance에서 KOSPI 가져오기 #####
kospi_total_url = 'https://finance.naver.com/sise/sise_index_day.nhn?code=KOSPI'
# 일자 데이터를 담을 df라는 DataFrame 정의
kospi_total_df = | pd.DataFrame() | pandas.DataFrame |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
cities=('chicago','new york city','washington')
months=('january','february','march','april','may','june', 'all')
days=('sunday','monday','tuesday','wednesday','thursday','friday','saturday','all')
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city=input('Enter a city from those cities (Chicago,New York City,Washington) \n').lower()
if city not in cities: #check if the input is valid
print('This input is invalid please enter a valid one')
continue
else:
break
# get user input for month (all, january, february, ... , june)
while True:
month=input('Enter a month from January to June or type all to view all months \n').lower()
if month not in months: #check if the input is valid
print('This input is invalid please enter a valid one')
continue
else:
break
#get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day=input('Enter a day from Monday to Sunday or type all to view all weeks \n').lower()
if day not in days: #check if the input is valid
print('This input is invalid please enter a valid one')
continue
else:
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
#we will load the data into a dataframe
df=pd.read_csv(CITY_DATA[city])
#to convert the start time column to datetime
df['Start Time']= | pd.to_datetime(df['Start Time']) | pandas.to_datetime |
# Copyright (C) 2022 National Center for Atmospheric Research and National Oceanic and Atmospheric Administration
# SPDX-License-Identifier: Apache-2.0
#
""" This is the overall control file. It will drive the entire analysis package"""
import monetio as mio
import monet as m
import os
import xarray as xr
import pandas as pd
import numpy as np
import datetime
# from util import write_ncf
__all__ = (
"pair",
"observation",
"model",
"analysis",
)
class pair:
"""The pair class.
The pair class pairs model data
directly with observational data along time and space.
"""
def __init__(self):
"""Initialize a :class:`pair` object.
Returns
-------
pair
"""
self.type = 'pt_sfc'
self.radius_of_influence = 1e6
self.obs = None
self.model = None
self.model_vars = None
self.obs_vars = None
self.filename = None
def __repr__(self):
return (
f"{type(self).__name__}(\n"
f" type={self.type!r},\n"
f" radius_of_influence={self.radius_of_influence!r},\n"
f" obs={self.obs!r},\n"
f" model={self.model!r},\n"
f" model_vars={self.model_vars!r},\n"
f" obs_vars={self.obs_vars!r},\n"
f" filename={self.filename!r},\n"
")"
)
def fix_paired_xarray(self, dset):
"""Reformat the paired dataset.
Parameters
----------
dset : xarray.Dataset
Returns
-------
xarray.Dataset
Reformatted paired dataset.
"""
# first convert to dataframe
df = dset.to_dataframe().reset_index(drop=True)
# now get just the single site index
dfpsite = df.rename({'siteid': 'x'}, axis=1).drop_duplicates(subset=['x'])
columns = dfpsite.columns # all columns
site_columns = [
'latitude',
'longitude',
'x',
'site',
'msa_code',
'cmsa_name',
'epa_region',
'state_name',
'msa_name',
'site',
'utcoffset',
] # only columns for single site identificaiton
# site only xarray obj (no time dependence)
dfps = dfpsite.loc[:, columns[columns.isin(site_columns)]].set_index(['x']).to_xarray() # single column index
# now pivot df and convert back to xarray using only non site_columns
site_columns.remove('x') # need to keep x to merge later
dfx = df.loc[:, df.columns[~df.columns.isin(site_columns)]].rename({'siteid': 'x'}, axis=1).set_index(['time', 'x']).to_xarray()
# merge the time dependent and time independent
out = xr.merge([dfx, dfps])
# reset x index and add siteid back to the xarray object
if ~pd.api.types.is_numeric_dtype(out.x):
siteid = out.x.values
out['x'] = range(len(siteid))
out['siteid'] = (('x'), siteid)
return out
class observation:
"""The observation class.
A class with information and data from an observational dataset.
"""
def __init__(self):
"""Initialize an :class:`observation` object.
Returns
-------
observation
"""
self.obs = None
self.label = None
self.file = None
self.obj = None
"""The data object (:class:`pandas.DataFrame` or :class:`xarray.Dataset`)."""
self.type = 'pt_src'
self.variable_dict = None
def __repr__(self):
return (
f"{type(self).__name__}(\n"
f" obs={self.obs!r},\n"
f" label={self.label!r},\n"
f" file={self.file!r},\n"
f" obj={repr(self.obj) if self.obj is None else '...'},\n"
f" type={self.type!r},\n"
f" variable_dict={self.variable_dict!r},\n"
")"
)
def open_obs(self):
"""Open the observational data, store data in observation pair,
and apply mask and scaling.
Returns
-------
None
"""
from glob import glob
from numpy import sort
from . import tutorial
if self.file.startswith("example:"):
example_id = ":".join(s.strip() for s in self.file.split(":")[1:])
files = [tutorial.fetch_example(example_id)]
else:
files = sort(glob(self.file))
assert len(files) >= 1, "need at least one"
_, extension = os.path.splitext(files[0])
try:
if extension in {'.nc', '.ncf', '.netcdf', '.nc4'}:
if len(files) > 1:
self.obj = xr.open_mfdataset(files)
else:
self.obj = xr.open_dataset(files[0])
elif extension in ['.ict', '.icarrt']:
assert len(files) == 1, "monetio.icarrt.add_data can only read one file"
self.obj = mio.icarrt.add_data(files[0])
else:
raise ValueError(f'extension {extension!r} currently unsupported')
except Exception as e:
print('something happened opening file:', e)
return
self.mask_and_scale() # mask and scale values from the control values
def mask_and_scale(self):
"""Mask and scale observations, including unit conversions and setting
detection limits.
Returns
-------
None
"""
vars = self.obj.data_vars
if self.variable_dict is not None:
for v in vars:
if v in self.variable_dict:
d = self.variable_dict[v]
# Apply removal of min, max, and nan on the units in the obs file first.
if 'obs_min' in d:
self.obj[v].data = self.obj[v].where(self.obj[v] >= d['obs_min'])
if 'obs_max' in d:
self.obj[v].data = self.obj[v].where(self.obj[v] <= d['obs_max'])
if 'nan_value' in d:
self.obj[v].data = self.obj[v].where(self.obj[v] != d['nan_value'])
# Then apply a correction if needed for the units.
if 'unit_scale' in d:
scale = d['unit_scale']
else:
scale = 1
if 'unit_scale_method' in d:
if d['unit_scale_method'] == '*':
self.obj[v].data *= scale
elif d['unit_scale_method'] == '/':
self.obj[v].data /= scale
elif d['unit_scale_method'] == '+':
self.obj[v].data += scale
elif d['unit_scale_method'] == '-':
self.obj[v].data += -1 * scale
def obs_to_df(self):
"""Convert and reformat observation object (:attr:`obj`) to dataframe.
Returns
-------
None
"""
self.obj = self.obj.to_dataframe().reset_index().drop(['x', 'y'], axis=1)
class model:
"""The model class.
A class with information and data from model results.
"""
def __init__(self):
"""Initialize a :class:`model` object.
Returns
-------
model
"""
self.model = None
self.radius_of_influence = None
self.mod_kwargs = {}
self.file_str = None
self.files = None
self.file_vert_str = None
self.files_vert = None
self.file_surf_str = None
self.files_surf = None
self.file_pm25_str = None
self.files_pm25 = None
self.label = None
self.obj = None
self.mapping = None
self.variable_dict = None
self.plot_kwargs = None
def __repr__(self):
return (
f"{type(self).__name__}(\n"
f" model={self.model!r},\n"
f" radius_of_influence={self.radius_of_influence!r},\n"
f" mod_kwargs={self.mod_kwargs!r},\n"
f" file_str={self.file_str!r},\n"
f" label={self.label!r},\n"
f" obj={repr(self.obj) if self.obj is None else '...'},\n"
f" mapping={self.mapping!r},\n"
f" label={self.label!r},\n"
" ...\n"
")"
)
def glob_files(self):
"""Convert the model file location string read in by the yaml file
into a list of files containing all model data.
Returns
-------
None
"""
from numpy import sort # TODO: maybe use `sorted` for this
from glob import glob
from . import tutorial
print(self.file_str)
if self.file_str.startswith("example:"):
example_id = ":".join(s.strip() for s in self.file_str.split(":")[1:])
self.files = [tutorial.fetch_example(example_id)]
else:
self.files = sort(glob(self.file_str))
if self.file_vert_str is not None:
self.files_vert = sort(glob(self.file_vert_str))
if self.file_surf_str is not None:
self.files_surf = sort(glob(self.file_surf_str))
if self.file_pm25_str is not None:
self.files_pm25 = sort(glob(self.file_pm25_str))
def open_model_files(self):
"""Open the model files, store data in :class:`model` instance attributes,
and apply mask and scaling.
Models supported are cmaq, wrfchem, rrfs, and gsdchem.
If a model is not supported, MELODIES-MONET will try to open
the model data using a generic reader. If you wish to include new
models, add the new model option to this module.
Returns
-------
None
"""
self.glob_files()
# Calculate species to input into MONET, so works for all mechanisms in wrfchem
# I want to expand this for the other models too when add aircraft data.
list_input_var = []
for obs_map in self.mapping:
list_input_var = list_input_var + list(set(self.mapping[obs_map].keys()) - set(list_input_var))
#Only certain models need this option for speeding up i/o.
if 'cmaq' in self.model.lower():
print('**** Reading CMAQ model output...')
self.mod_kwargs.update({'var_list' : list_input_var})
if self.files_vert is not None:
self.mod_kwargs.update({'fname_vert' : self.files_vert})
if self.files_surf is not None:
self.mod_kwargs.update({'fname_surf' : self.files_surf})
if len(self.files) > 1:
self.mod_kwargs.update({'concatenate_forecasts' : True})
self.obj = mio.models._cmaq_mm.open_mfdataset(self.files,**self.mod_kwargs)
elif 'wrfchem' in self.model.lower():
print('**** Reading WRF-Chem model output...')
self.mod_kwargs.update({'var_list' : list_input_var})
self.obj = mio.models._wrfchem_mm.open_mfdataset(self.files,**self.mod_kwargs)
elif 'rrfs' in self.model.lower():
print('**** Reading RRFS-CMAQ model output...')
if self.files_pm25 is not None:
self.mod_kwargs.update({'fname_pm25' : self.files_pm25})
self.mod_kwargs.update({'var_list' : list_input_var})
self.obj = mio.models._rrfs_cmaq_mm.open_mfdataset(self.files,**self.mod_kwargs)
elif 'gsdchem' in self.model.lower():
print('**** Reading GSD-Chem model output...')
if len(self.files) > 1:
self.obj = mio.fv3chem.open_mfdataset(self.files,**self.mod_kwargs)
else:
self.obj = mio.fv3chem.open_dataset(self.files,**self.mod_kwargs)
elif 'cesm_fv' in self.model.lower():
print('**** Reading CESM FV model output...')
self.mod_kwargs.update({'var_list' : list_input_var})
self.obj = mio.models._cesm_fv_mm.open_mfdataset(self.files,**self.mod_kwargs)
# CAM-chem-SE grid or MUSICAv0
elif 'cesm_se' in self.model.lower():
from .new_monetio import read_cesm_se
self.mod_kwargs.update({'var_list' : list_input_var})
self.mod_kwargs.update({'scrip_file' : self.scrip_file})
print('**** Reading CESM SE model output...')
self.obj = read_cesm_se.open_mfdataset(self.files,**self.mod_kwargs)
#self.obj, self.obj_scrip = read_cesm_se.open_mfdataset(self.files,**self.mod_kwargs)
#self.obj.monet.scrip = self.obj_scrip
else:
print('**** Reading Unspecified model output. Take Caution...')
if len(self.files) > 1:
self.obj = xr.open_mfdataset(self.files,**self.mod_kwargs)
else:
self.obj = xr.open_dataset(self.files[0],**self.mod_kwargs)
self.mask_and_scale()
def mask_and_scale(self):
"""Mask and scale observations including unit conversions and setting
detection limits.
Returns
-------
None
"""
vars = self.obj.data_vars
if self.variable_dict is not None:
for v in vars:
if v in self.variable_dict:
d = self.variable_dict[v]
if 'unit_scale' in d:
scale = d['unit_scale']
else:
scale = 1
if 'unit_scale_method' in d:
if d['unit_scale_method'] == '*':
self.obj[v].data *= scale
elif d['unit_scale_method'] == '/':
self.obj[v].data /= scale
elif d['unit_scale_method'] == '+':
self.obj[v].data += scale
elif d['unit_scale_method'] == '-':
self.obj[v].data += -1 * scale
class analysis:
"""The analysis class.
The analysis class is the highest
level class and stores all information about the analysis. It reads
and stores information from the input yaml file and defines
overarching analysis information like the start and end time, which
models and observations to pair, etc.
"""
def __init__(self):
"""Initialize the :class:`analysis` object.
Returns
-------
analysis
"""
self.control = 'control.yaml'
self.control_dict = None
self.models = {}
"""dict : Models, set by :meth:`open_models`."""
self.obs = {}
"""dict : Observations, set by :meth:`open_obs`."""
self.paired = {}
"""dict : Paired data, set by :meth:`pair_data`."""
self.start_time = None
self.end_time = None
self.download_maps = True # Default to True
self.output_dir = None
self.debug = False
def __repr__(self):
return (
f"{type(self).__name__}(\n"
f" control={self.control!r},\n"
f" control_dict={repr(self.control_dict) if self.control_dict is None else '...'},\n"
f" models={self.models!r},\n"
f" obs={self.obs!r},\n"
f" paired={self.paired!r},\n"
f" start_time={self.start_time!r},\n"
f" end_time={self.end_time!r},\n"
f" download_maps={self.download_maps!r},\n"
f" output_dir={self.output_dir!r},\n"
f" debug={self.debug!r},\n"
")"
)
def read_control(self, control=None):
"""Read the input yaml file,
updating various :class:`analysis` instance attributes.
Parameters
----------
control : str
Input yaml file path.
If provided, :attr:`control` will be set to this value.
Returns
-------
None
"""
import yaml
if control is not None:
self.control = control
with open(self.control, 'r') as stream:
self.control_dict = yaml.safe_load(stream)
# set analysis time
self.start_time = | pd.Timestamp(self.control_dict['analysis']['start_time']) | pandas.Timestamp |
import numpy as np
import pandas as pd
from scipy import sparse
import scanpy as sc
from sklearn.linear_model import LinearRegression
from scIB.utils import checkAdata, checkBatch
def pcr_comparison(
adata_pre,
adata_post,
covariate,
embed=None,
n_comps=50,
scale=True,
verbose=False
):
"""
Compare the effect before and after integration
Return either the difference of variance contribution before and after integration
or a score between 0 and 1 (`scaled=True`) with 0 if the variance contribution hasn't
changed. The larger the score, the more different the variance contributions are before
and after integration.
params:
adata_pre: uncorrected adata
adata_post: integrated adata
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
scale: if True, return scaled score
return:
difference of R2Var value of PCR
"""
if embed == 'X_pca':
embed = None
pcr_before = pcr(adata_pre, covariate=covariate, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
pcr_after = pcr(adata_post, covariate=covariate, embed=embed, recompute_pca=True,
n_comps=n_comps, verbose=verbose)
if scale:
score = (pcr_before - pcr_after) / pcr_before
if score < 0:
print("Variance contribution increased after integration!")
print("Setting PCR comparison score to 0.")
score = 0
return score
else:
return pcr_after - pcr_before
def pcr(
adata,
covariate,
embed=None,
n_comps=50,
recompute_pca=True,
verbose=False
):
"""
PCR for Adata object
Checks whether to
+ compute PCA on embedding or expression data (set `embed` to name of embedding matrix e.g. `embed='X_emb'`)
+ use existing PCA (only if PCA entry exists)
+ recompute PCA on expression matrix (default)
params:
adata: Anndata object
embed : if `embed=None`, use the full expression matrix (`adata.X`), otherwise
use the embedding provided in `adata_post.obsm[embed]`
n_comps: number of PCs if PCA should be computed
covariate: key for adata.obs column to regress against
return:
R2Var of PCR
"""
checkAdata(adata)
checkBatch(covariate, adata.obs)
if verbose:
print(f"covariate: {covariate}")
covariate_values = adata.obs[covariate]
# use embedding for PCA
if (embed is not None) and (embed in adata.obsm):
if verbose:
print(f"compute PCR on embedding n_comps: {n_comps}")
return pc_regression(adata.obsm[embed], covariate_values, n_comps=n_comps)
# use existing PCA computation
elif (recompute_pca == False) and ('X_pca' in adata.obsm) and ('pca' in adata.uns):
if verbose:
print("using existing PCA")
return pc_regression(adata.obsm['X_pca'], covariate_values, pca_var=adata.uns['pca']['variance'])
# recompute PCA
else:
if verbose:
print(f"compute PCA n_comps: {n_comps}")
return pc_regression(adata.X, covariate_values, n_comps=n_comps)
def pc_regression(
data,
covariate,
pca_var=None,
n_comps=50,
svd_solver='arpack',
verbose=False
):
"""
params:
data: expression or PCA matrix. Will be assumed to be PCA values, if pca_sd is given
covariate: series or list of batch assignments
n_comps: number of PCA components for computing PCA, only when pca_sd is not given. If no pca_sd is given and n_comps=None, comute PCA and don't reduce data
pca_var: iterable of variances for `n_comps` components. If `pca_sd` is not `None`, it is assumed that the matrix contains PCA values, else PCA is computed
PCA is only computed, if variance contribution is not given (pca_sd).
"""
if isinstance(data, (np.ndarray, sparse.csr_matrix, sparse.csc_matrix)):
matrix = data
else:
raise TypeError(f'invalid type: {data.__class__} is not a numpy array or sparse matrix')
# perform PCA if no variance contributions are given
if pca_var is None:
if n_comps is None or n_comps > min(matrix.shape):
n_comps = min(matrix.shape)
if n_comps == min(matrix.shape):
svd_solver = 'full'
if verbose:
print("compute PCA")
pca = sc.tl.pca(matrix, n_comps=n_comps, use_highly_variable=False,
return_info=True, svd_solver=svd_solver, copy=True)
X_pca = pca[0].copy()
pca_var = pca[3].copy()
del pca
else:
X_pca = matrix
n_comps = matrix.shape[1]
## PC Regression
if verbose:
print("fit regression on PCs")
# handle categorical values
if | pd.api.types.is_numeric_dtype(covariate) | pandas.api.types.is_numeric_dtype |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from pathlib import Path
import numpy as np
import pandas as pd
train = pd.read_csv("corpus/imdb/labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3)
test = pd.read_csv("corpus/imdb/testData.tsv", header=0,
delimiter="\t", quoting=3)
train_texts = train["review"].tolist()
train_labels = train["sentiment"].tolist()
test_texts = test["review"].tolist()
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
import tensorflow as tf
train_dataset = tf.data.Dataset.from_tensor_slices((
dict(train_encodings),
train_labels
))
val_dataset = tf.data.Dataset.from_tensor_slices((
dict(val_encodings),
val_labels
))
# test_labels = [1]*len(test1)
test_dataset = tf.data.Dataset.from_tensor_slices((
dict(test_encodings)
))
from transformers import TFDistilBertForSequenceClassification
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
# In[3]:
optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5)
model.compile(optimizer=optimizer, loss=model.compute_loss) # can also use any keras loss fn
# In[4]:
history = model.fit(train_dataset.batch(5), epochs=5)
# In[5]:
model.evaluate(val_dataset.batch(5))
# In[6]:
labels_pred = model.predict(test_dataset.batch(5))
# In[9]:
from matplotlib import pyplot as plt
plt.plot(history.history['acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 绘制训练 & 验证的损失值
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# In[10]:
y = labels_pred.logits
y_pred = np.argmax(y,axis = 1)
# In[15]:
y
# In[12]:
y_pred
# In[13]:
result_output = | pd.DataFrame(data={"id": test["id"], "sentiment": y_pred}) | pandas.DataFrame |
import pandas as pd
import shutil
import os
import time
import re
import datetime
from functools import partial
def append_csvs_to_csv(csv_filepath_list, outpath=None):
"""
Appends csvs into a single csv. Is memory efficient by only keeping the current
processed file in memory. However still keeps track of changing columns to ensure
data is correctly aligned.
:param csv_filepath_list:
:param outpath:
:return:
"""
outpath, df_of_headers = _get_outpath_and_df_of_headers(outpath)
all_columns = [col for col in df_of_headers.columns]
for file in csv_filepath_list:
df_for_append = pd.read_csv(file) #load new data
df_of_headers, all_columns = _append_df_to_csv(df_for_append, df_of_headers, outpath, all_columns)
def append_csv_to_csv(inpath, outpath):
return append_csvs_to_csv([inpath], outpath)
def append_csvs_to_monthly_csv_of_first_date(csv_filepath_list, rootname):
[append_csv_to_monthly_csv_of_first_date(inpath, rootname) for inpath in csv_filepath_list]
def append_csv_to_monthly_csv_of_first_date(inpath, rootname):
df_for_append = | pd.read_csv(inpath) | pandas.read_csv |
from pdpbox.info_plots import target_plot
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
def test_binary(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Sex',
feature_name='Sex',
target=titanic_target)
expected = pd.DataFrame(
{'x': {0: 0, 1: 1},
'display_column': {0: 'Sex_0', 1: 'Sex_1'},
'count': {0: 314, 1: 577},
'Survived': {0: 0.7420382165605095, 1: 0.18890814558058924}}
)
assert_frame_equal(expected, summary_df, check_like=True)
def test_onehot(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature=['Embarked_C', 'Embarked_Q',
'Embarked_S'],
feature_name='Embarked',
target=titanic_target)
expected = pd.DataFrame(
{'x': {0: 0, 1: 1, 2: 2},
'display_column': {0: 'Embarked_C', 1: 'Embarked_Q', 2: 'Embarked_S'},
'count': {0: 168, 1: 77, 2: 646},
'Survived': {0: 0.5535714285714286,
1: 0.38961038961038963,
2: 0.33900928792569657}}
)
assert_frame_equal(expected, summary_df, check_like=True)
def test_numeric(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target)
expected = pd.DataFrame(
{'x': {0: 0, 4: 4, 7: 7},
'display_column': {0: '[0, 7.73)',
4: '[13, 16.7)',
7: '[35.11, 73.5)'},
'value_lower': {0: 0.0, 4: 13.0, 7: 35.111111111111086},
'value_upper': {0: 7.732844444444444, 4: 16.7, 7: 73.5},
'count': {0: 99, 4: 108, 7: 96},
'Survived': {0: 0.1414141414141414,
4: 0.37037037037037035,
7: 0.5104166666666666}}
)
assert_frame_equal(expected, summary_df.loc[[0, 4, 7], :], check_like=True)
assert len(summary_df) == 9
def test_endpoint(titanic_data, titanic_target):
"""
test endpoint==False (last point should't be included)
"""
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
endpoint=False)
expected = pd.DataFrame(
{'x': {0: 0, 8: 8, 9: 9},
'display_column': {0: '[0, 7.73)', 8: '[73.5, 512.33)',
9: '>= 512.33'},
'value_lower': {0: 0.0, 8: 73.5, 9: 512.3292},
'value_upper': {0: 7.732844444444444, 8: 512.3292, 9: np.nan},
'count': {0: 99, 8: 99, 9: 3},
'Survived': {0: 0.1414141414141414, 8: 0.7171717171717171, 9: 1.0}}
)
assert_frame_equal(expected, summary_df.loc[[0, 8, 9], :], check_like=True)
assert len(summary_df) == 10
def test_num_grid_points(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
num_grid_points=20)
expected = pd.DataFrame(
{'x': {0: 0, 9: 9, 18: 18},
'display_column': {0: '[0, 7.22)',
9: '[13, 15.5)',
18: '[110.88, 512.33]'},
'value_lower': {0: 0.0, 9: 13.0, 18: 110.8833},
'value_upper': {0: 7.225, 9: 15.5, 18: 512.3292},
'count': {0: 43, 9: 80, 18: 49},
'Survived': {0: 0.06976744186046512, 9: 0.3375,
18: 0.7551020408163265}}
)
assert_frame_equal(expected, summary_df.loc[[0, 9, 18], :], check_like=True)
assert len(summary_df) == 19
def test_grid_type(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
grid_type='equal')
expected = pd.DataFrame(
{'x': {1: 1, 6: 6, 8: 8},
'display_column': {1: '[56.93, 113.85)',
6: '[341.55, 398.48)',
8: '[455.4, 512.33]'},
'value_lower': {1: 56.925466666666665, 6: 341.5528,
8: 455.4037333333333},
'value_upper': {1: 113.85093333333333, 6: 398.4782666666666,
8: 512.3292},
'count': {1: 87.0, 6: 0.0, 8: 3.0},
'Survived': {1: 0.6551724137931034, 6: np.nan, 8: 1.0}}
)
assert_frame_equal(expected, summary_df.loc[[1, 6, 8], :], check_like=True)
assert len(summary_df) == 9
def test_grid_range(titanic_data, titanic_target):
"""
grid_range, need to set grid_type='equal'
"""
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
grid_type='equal',
grid_range=(5, 100))
expected = pd.DataFrame(
{'x': {0: 0, 4: 4, 8: 8},
'display_column': {0: '[5, 15.56)',
4: '[47.22, 57.78)',
8: '[89.44, 100]'},
'value_lower': {0: 5.0, 4: 47.22222222222222, 8: 89.44444444444444},
'value_upper': {0: 15.555555555555555, 4: 57.77777777777778, 8: 100.0},
'count': {0: 459, 4: 39, 8: 8},
'Survived': {0: 0.25925925925925924, 4: 0.6666666666666666, 8: 0.875}}
)
assert_frame_equal(expected, summary_df.loc[[0, 4, 8], :], check_like=True)
assert len(summary_df) == 9
def test_grid_range_outliers(titanic_data, titanic_target):
"""
show_outliers with grid_range defined
grid_range, need to set grid_type='equal'
"""
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
grid_range=(0, 100),
grid_type='equal',
show_outliers=True)
expected = pd.DataFrame(
{'x': {0: 0, 8: 8, 9: 9},
'display_column': {0: '[0, 11.11)', 8: '[88.89, 100]',
9: '> 100'},
'value_lower': {0: 0.0, 8: 88.88888888888889, 9: 100.0},
'value_upper': {0: 11.11111111111111, 8: 100.0, 9: np.nan},
'count': {0: 364, 8: 10, 9: 53},
'Survived': {0: 0.2087912087912088, 8: 0.9, 9: 0.7358490566037735}}
)
assert_frame_equal(expected, summary_df.loc[[0, 8, 9], :], check_like=True)
assert len(summary_df) == 10
def test_grid_range_outliers_endpoint(titanic_data, titanic_target):
"""
show_outliers with grid_range defined and endpoint==False
grid_range, need to set grid_type='equal'
"""
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
grid_range=(0, 100),
grid_type='equal',
show_outliers=True,
endpoint=False)
expected = pd.DataFrame(
{'x': {0: 0, 8: 8, 9: 9},
'display_column': {0: '[0, 11.11)', 8: '[88.89, 100)',
9: '>= 100'},
'value_lower': {0: 0.0, 8: 88.88888888888889, 9: 100.0},
'value_upper': {0: 11.11111111111111, 8: 100.0, 9: np.nan},
'count': {0: 364, 8: 10, 9: 53},
'Survived': {0: 0.2087912087912088, 8: 0.9, 9: 0.7358490566037735}}
)
assert_frame_equal(expected, summary_df.loc[[0, 8, 9], :], check_like=True)
assert len(summary_df) == 10
def test_cust_grid_points(titanic_data, titanic_target):
fig, axes, summary_df = target_plot(df=titanic_data,
feature='Fare',
feature_name='Fare',
target=titanic_target,
cust_grid_points=range(0, 100, 10))
expected = pd.DataFrame(
{'x': {0: 0, 4: 4, 8: 8},
'display_column': {0: '[0, 10)',
4: '[40, 50)',
8: '[80, 90]'},
'value_lower': {0: 0.0, 4: 40.0, 8: 80.0},
'value_upper': {0: 10.0, 4: 50.0, 8: 90.0},
'count': {0: 336, 4: 15, 8: 19},
'Survived': {0: 0.19940476190476192,
4: 0.26666666666666666,
8: 0.8421052631578947}}
)
| assert_frame_equal(expected, summary_df.loc[[0, 4, 8], :], check_like=True) | pandas.testing.assert_frame_equal |
"""
Zonal Statistics
Vector-Raster Analysis
Modified by <NAME> from 2013 <NAME> and AsgerPetersen:
usage: generate_twi_per_basin.py [-h] [--output flag [--buffer distance] [--nodata value] [-f FORMAT]
catchments twi_raster slope_raster outputfolder_twi
positional arguments:
namest HUC Number
catchments hydrofabrics catchment
time_to_stream_raster Time to stream in minutes - generated with workflow_hand_twi_giuh.sh
outputfolder_giuh Output folder
optional arguments:
-h, --help show this help message and exit
--output flag 0-Only generates the param file, 1- generates the cfe config file
--buffer distance Buffer geometry by this distance before calculation
--nodata value Use this nodata value instead of value from raster
--preload Preload entire raster into memory instead of a read
per vector feature
"""
from osgeo import gdal, ogr
from osgeo.gdalconst import *
import numpy as np
import sys
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib
import os
import json
gdal.PushErrorHandler('CPLQuietErrorHandler')
ogr.UseExceptions()
def bbox_to_pixel_offsets(gt, bbox):
originX = gt[0]
originY = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int((bbox[0] - originX) / pixel_width)
x2 = int((bbox[1] - originX) / pixel_width)
#x2 = int((bbox[1] - originX) / pixel_width) + 1
y1 = int((bbox[3] - originY) / pixel_height)
y2 = int((bbox[2] - originY) / pixel_height)
#y2 = int((bbox[2] - originY) / pixel_height) + 1
xsize = x2 - x1
ysize = y2 - y1
return (x1, y1, xsize, ysize)
# namest='020302'
# catchments='/home/west/Projects/hydrofabrics/20210511/catchments_wgs84.geojson'
# time_to_stream_raster='/home/west/Projects/IUH_TWI/HAND_10m//020302/020302dsave1_cr.tif'
# outputfolder_giuh='/home/west/Projects/hydrofabrics/20210511//GIUH_10m_1/'
# nodata_value = -999
# buffer_distance = 0.001
# output_flag = 1
# global_src_extent = 0
def generate_giuh_per_basin(namestr,catchments, time_to_stream_raster, outputfolder_giuh,
output_flag=1,
nodata_value=None,
global_src_extent=False,
buffer_distance=0.001):
outputfolder_giuh_param_file=outputfolder_giuh+"/width_function/"
if not os.path.exists(outputfolder_giuh_param_file): os.mkdir(outputfolder_giuh_param_file)
if(output_flag==1):
outputfolder_giuh_config_file=outputfolder_giuh+"/CFE_config_file/"
if not os.path.exists(outputfolder_giuh_config_file): os.mkdir(outputfolder_giuh_config_file)
rds = gdal.Open(time_to_stream_raster, GA_ReadOnly)
assert rds, "Could not open raster" +time_to_stream_raster
rb = rds.GetRasterBand(1)
rgt = rds.GetGeoTransform()
if nodata_value:
# Override with user specified nodata
nodata_value = float(nodata_value)
rb.SetNoDataValue(nodata_value)
else:
# Use nodata from band
nodata_value = float(rb.GetNoDataValue())
# Warn if nodata is NaN as this will not work with the mask (as NaN != NaN)
assert nodata_value == nodata_value, "Cannot handle NaN nodata value"
if buffer_distance:
buffer_distance = float(buffer_distance)
vds = ogr.Open(catchments, GA_ReadOnly)
assert(vds)
vlyr = vds.GetLayer(0)
# vdefn = vlyr.GetLayerDefn()
# Calculate (potentially buffered) vector layer extent
vlyr_extent = vlyr.GetExtent()
if buffer_distance:
expand_by = [-buffer_distance, buffer_distance, -buffer_distance, buffer_distance]
vlyr_extent = [a + b for a, b in zip(vlyr_extent, expand_by)]
# create an in-memory numpy array of the source raster data
# covering the whole extent of the vector layer
if global_src_extent:
# use global source extent
# useful only when disk IO or raster scanning inefficiencies are your limiting factor
# advantage: reads raster data in one pass
# disadvantage: large vector extents may have big memory requirements
src_offset = bbox_to_pixel_offsets(rgt, vlyr_extent)
#print (str(src_offset))
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the layer subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5]
)
mem_drv = ogr.GetDriverByName('Memory')
driver = gdal.GetDriverByName('MEM')
skippednulgeoms = False
total = vlyr.GetFeatureCount(force = 0)
vlyr.ResetReading()
count = 0
feat = vlyr.GetNextFeature()
while feat is not None:
cat = feat.GetField('ID')
count = count + 1
#print (str(cat))
if count % 100 == 0:
sys.stdout.write("\r{0} of {1}".format(count, total))
sys.stdout.flush()
if feat.GetGeometryRef() is None:
# Null geometry. Write to dst and continue
if not skippednulgeoms:
print ("\nWarning: Skipping nullgeoms\n")
skippednulgeoms = True
feat = vlyr.GetNextFeature()
continue
mem_feat = feat.Clone()
mem_type = mem_feat.GetGeometryRef().GetGeometryType()
if buffer_distance:
mem_type = ogr.wkbPolygon
mem_feat.SetGeometryDirectly( mem_feat.GetGeometryRef().Buffer(buffer_distance) )
if not global_src_extent:
# use local source extent
# fastest option when you have fast disks and well indexed raster (ie tiled Geotiff)
# advantage: each feature uses the smallest raster chunk
# disadvantage: lots of reads on the source raster
src_offset = bbox_to_pixel_offsets(rgt, mem_feat.geometry().GetEnvelope())
#print (str(src_offset))
src_array = rb.ReadAsArray(*src_offset)
# calculate new geotransform of the feature subset
new_gt = (
(rgt[0] + (src_offset[0] * rgt[1])),
rgt[1],
0.0,
(rgt[3] + (src_offset[1] * rgt[5])),
0.0,
rgt[5]
)
if not src_array is None:
#print ("src_array")
#print (src_array)
# Create a temporary vector layer in memory
mem_ds = mem_drv.CreateDataSource('out')
mem_layer = mem_ds.CreateLayer('mem_lyr', None, mem_type)
mem_layer.CreateFeature(mem_feat)
# Rasterize it
rvds = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte)
rvds.SetGeoTransform(new_gt)
gdal.RasterizeLayer(rvds, [1], mem_layer, burn_values=[1])
rv_array = rvds.ReadAsArray()
# Mask basin area
masked_basin = np.ma.MaskedArray(
src_array,
mask=np.logical_not(rv_array)
)
all_values_in_basin=((masked_basin.mask==False)).sum() # False where the basin is, not sure why. So counting pixels in the basin
# Also remove missing data - keep only valid values
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(
src_array == nodata_value,
np.logical_not(rv_array) # remove this since it was creating issues with 1
)
)
all_valid_values_in_basin=((masked.mask==False)).sum()
Check=100*all_valid_values_in_basin/all_values_in_basin # Porcentage of valid numbers in the polygone
if(Check>80):
#Create a 1-d array - include nan for points outside of the polygone
maskedArray=np.ma.filled(masked.astype(float), np.nan).flatten()
#remove all values outside of the polygone which are marked as nan
maskedArray2=maskedArray[(maskedArray!=nodata_value) & (~np.isnan(maskedArray)) & (maskedArray>=0)] # Values covered by the polygon
# Due to incompatibilities with hydrofabrics
sorted_array = np.sort(maskedArray2)
if(len(np.unique(sorted_array))>5):
Per5=np.percentile(sorted_array,10)
Per95=np.percentile(sorted_array,95)
sorted_array=sorted_array[(sorted_array>=Per5) & (sorted_array<=Per95)]
sorted_array=(sorted_array-min(sorted_array))
else:
sorted_array=np.zeros(20)+3600.
Per5=np.percentile(sorted_array,5)
Per95=np.percentile(sorted_array,95)
if(len(np.unique(sorted_array))>10): sorted_array=sorted_array[(sorted_array>=Per5) & (sorted_array<=Per95)]
sorted_array=(sorted_array-min(sorted_array))/60.
AllData = | pd.DataFrame(columns=['TravelTimeHour'], data=sorted_array) | pandas.DataFrame |
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from _imports import *
os.system('cls')
remove_duplicates = ask_for_user_preference('Czy usunąć duplikaty projektów wygenerowanych przez algorytmy?')
verify_designs = ask_for_user_preference('Czy symulacyjnie zweryfikować własności najlepszych projektów?')
# procedury pomocnicze
def show_geometry_preview(settings_sim, pattern, scale_geometries = 3):
courant_number = settings_sim['basic']['courant_number']
basic_element_dimensions = settings_sim['diffuser_geometry']['basic_element_dimensions']
fs = settings_sim['basic']['fs']
T_air_C = settings_sim['propagation_medium']['T_air_C']
p_air_hPa = settings_sim['propagation_medium']['p_air_hPa']
RH = settings_sim['propagation_medium']['RH']
c, Z_air = get_air_properties(T_air_C, p_air_hPa, RH)
T = 1/fs # [s]
X = c*T/courant_number # [m]
num_element_height_levels = settings_sim['diffuser_geometry']['num_element_height_levels']
diffuser_depth = settings_sim['diffuser_geometry']['diffuser_depth']
shape_skyline = generate_2D_Skyline_diffuser(
pattern,
element_seg_depth=cont2disc(diffuser_depth*scale_geometries/num_element_height_levels,X),
element_size=cont2disc(basic_element_dimensions*scale_geometries,X))
show_shape(shape_skyline)
def verify_scattering_properties(settings_sim, pattern, reference_data):
mean_coeff = evaluate_design(settings_sim, pattern, reference_data)
print('średnia dyfuzja: ', mean_coeff)
# print (mean_coeff)
# draw_subband_polar_response(settings_sim, imp_res_object[0])
# plt.title('xy')
# draw_subband_polar_response(settings_sim, imp_res_object[1])
# plt.title('yz')
def remove_duplicate_designs(patterns, diffusions):
filtered_patterns = []
filtered_diffusions = []
def pattern_in_list(pattern, list):
list_of_comparisons = []
for element in list:
list_of_comparisons.append(np.array_equal(pattern,element))
return np.any(list_of_comparisons)
already_existing_patterns = []
for pattern, diffusion in zip(patterns, diffusions):
if not pattern_in_list(pattern, already_existing_patterns):
filtered_patterns.append(pattern)
already_existing_patterns.append(pattern)
filtered_diffusions.append(diffusion)
return filtered_patterns, filtered_diffusions
# konfiguracja procedur bazujących na AI
CONFIG_PATH_AI = '_settings/ai_default.ini'
CONFIG_PATH_SIM = '_settings/sim_default.ini'
settings_ai = read_config(CONFIG_PATH_AI)
settings_sim = read_config(CONFIG_PATH_SIM)
algenet_outcomes_dir = '../_joint_algenet_results'
file_save_dir = settings_sim['basic']['file_save_dir']
reference_file_path = os.path.join(file_save_dir,'reference.npy')
# odczyt danych referencyjnych do pomiaru dyfuzora
try:
print('obliczanie danych referencyjnych:')
reference_data = np.load(reference_file_path, allow_pickle=True).item()
except:
print(f'odczyt plik z danymi referencyjnymi ({reference_file_path}) nie powiódł się, rreferencja zostanie obliczona automatycznie')
imp_res_set_empty, imp_res_set_plate, _ = run_simulation_for_pattern(None,settings_sim, mode='reference_only')
reference_data = {
'plate':imp_res_set_plate,
'room':imp_res_set_empty,
'num_element_height_levels':settings_sim['diffuser_geometry']['num_element_height_levels'],
'diffuser_depth':settings_sim['diffuser_geometry']['diffuser_depth'],
'basic_element_dimensions':settings_sim['diffuser_geometry']['basic_element_dimensions'],
'fs':settings_sim['basic']['fs']}
# Zapis wyników obliczeń na dysk.
np.save(reference_file_path,reference_data)
# odczyt postępu algorytmu genetycznego
algenet_diffusions = []
algenet_patterns = []
algenet_gen_nums = []
if os.path.isdir(algenet_outcomes_dir):
for fname in os.listdir(algenet_outcomes_dir):
_, ext = os.path.splitext(fname)
if ext != '.npy': continue
fdata = np.load(os.path.join(algenet_outcomes_dir,fname), allow_pickle=True)
for item in fdata:
algenet_diffusions.append(item['diffusion'])
algenet_patterns.append(item['pattern'])
algenet_gen_nums.append(item['generation_number'])
best_dif_argmax = np.argmax(algenet_diffusions)
pattern = algenet_patterns[best_dif_argmax]
dif = algenet_diffusions[best_dif_argmax]
if remove_duplicates:
algenet_patterns, algenet_diffusions = remove_duplicate_designs(algenet_patterns, algenet_diffusions)
algenet_best_pattern_idx = np.argmax(algenet_diffusions)
# odczyt danych dla poszukiwania losowego
_, consolidated_data = obtain_replay_folder_contents(settings_ai)
random_diffusions = []
random_patterns = []
for entry in consolidated_data:
if 'input_pattern_generation' in list(entry.keys()):
if entry['input_pattern_generation'] != 'random':
continue
random_pattern = entry['replay_transitions'][0]['current_pattern']
random_diffusion = entry['episode_diffusions'][0] - entry['episode_rewards'][0]
random_diffusions.append(random_diffusion)
random_patterns.append(random_pattern)
if remove_duplicates:
random_patterns, random_diffusions = remove_duplicate_designs(random_patterns, random_diffusions)
random_diffusions = np.array(random_diffusions)
random_best_pattern_idx = np.argmax(random_diffusions)
# odczyt danych dla głębokiego gradientu strategii
agent_diffusions_rnd = []
agent_diffusions_bst = []
agent_patterns_rnd = []
agent_patterns_bst = []
for entry in consolidated_data:
episode_diffusions_argmax = np.argmax(entry['episode_diffusions'])
best_pattern = entry['replay_transitions'][episode_diffusions_argmax]['new_pattern']
if 'input_pattern_generation' in list(entry.keys()):
if entry['input_pattern_generation'] != 'random':
agent_diffusions_bst.append(np.max(entry['episode_diffusions']))
agent_patterns_bst.append(best_pattern)
continue
agent_diffusions_rnd.append(np.max(entry['episode_diffusions']))
agent_patterns_rnd.append(best_pattern)
if remove_duplicates:
agent_patterns_rnd, agent_diffusions_rnd = remove_duplicate_designs(agent_patterns_rnd, agent_diffusions_rnd)
agent_patterns_bst, agent_diffusions_bst = remove_duplicate_designs(agent_patterns_bst, agent_diffusions_bst)
dpg_best_pattern_bst_idx = np.argmax(agent_diffusions_bst)
dpg_best_pattern_rnd_idx = np.argmax(agent_diffusions_rnd)
print()
print(f'random - num designs: {len(random_diffusions)}')
print(f'genetic alg. - num designs: {len(algenet_diffusions)}')
print(f'deep policy gradient (random input) - num designs: {len(agent_diffusions_rnd)}')
print(f'deep policy gradient (best 10 input) - num designs: {len(agent_diffusions_bst)}')
print()
print()
print(f'best pattern random choice')
print(random_patterns[random_best_pattern_idx])
print(f'provided diffusion: {random_diffusions[random_best_pattern_idx]}')
if os.path.isdir(algenet_outcomes_dir):
print()
print(f'best pattern by genetic algorithm (generation no {algenet_gen_nums[algenet_best_pattern_idx]})')
print(algenet_patterns[algenet_best_pattern_idx])
print(f'provided diffusion: {algenet_diffusions[algenet_best_pattern_idx]}')
print()
print(f'best pattern by deep policy gradient (random input)')
print(agent_patterns_rnd[dpg_best_pattern_rnd_idx])
print(f'provided diffusion: {agent_diffusions_rnd[dpg_best_pattern_rnd_idx]}')
print()
print(f'best pattern by deep policy gradient (best 10 input)')
print(agent_patterns_bst[dpg_best_pattern_bst_idx])
print(f'provided diffusion: {agent_diffusions_bst[dpg_best_pattern_bst_idx]}')
print()
# Wykreślenie estymat gęstości prawdopodobieństwa
random_diffusions_df = pd.DataFrame()
random_diffusions_df = random_diffusions_df.assign(**{'mean diffusion coefficient':random_diffusions})
random_diffusions_df = random_diffusions_df.assign(**{'algorithm type':'random'})
agent_diffusions_rnd_df = pd.DataFrame()
agent_diffusions_rnd_df = agent_diffusions_rnd_df.assign(**{'mean diffusion coefficient':agent_diffusions_rnd})
agent_diffusions_rnd_df = agent_diffusions_rnd_df.assign(**{'algorithm type':'deep policy gradient (random input)'})
agent_diffusions_bst_df = pd.DataFrame()
agent_diffusions_bst_df = agent_diffusions_bst_df.assign(**{'mean diffusion coefficient':agent_diffusions_bst})
agent_diffusions_bst_df = agent_diffusions_bst_df.assign(**{'algorithm type':'deep policy gradient (best input)'})
algenet_diffusions_df = pd.DataFrame()
if os.path.isdir(algenet_outcomes_dir):
algenet_diffusions_df = algenet_diffusions_df.assign(**{'mean diffusion coefficient':algenet_diffusions})
algenet_diffusions_df = algenet_diffusions_df.assign(**{'algorithm type':'genetic algorithm'})
joint_df = | pd.concat([random_diffusions_df,agent_diffusions_rnd_df,agent_diffusions_bst_df,algenet_diffusions_df]) | pandas.concat |
from calendar import monthrange
from datetime import datetime
import pandas as pd
from flask import Blueprint, jsonify, abort, g
from gatekeeping.api.budget import get_budget
from gatekeeping.api.position import get_positions
from gatekeeping.api.function import get_functions, get_function
from gatekeeping.api.user import get_user_function
def get_line_chart(function=None):
positions = get_positions(check_submitter=False)
budget = get_budget()
columns = [row.keys() for row in positions]
positions = pd.DataFrame(positions, columns=columns[0])
budget = pd.DataFrame(budget, columns=columns[0])
if function:
if function != 'All':
positions = positions.loc[positions['function'] == function]
budget = budget.loc[budget['function'] == function]
if g.user['type'] != 'ADMIN' and function == 'All':
functions = get_user_function(g.user['id'])
function_names = [get_function(function['function_id'])['name'] for function in functions]
positions = positions.loc[positions['function'].isin(function_names)]
budget = budget.loc[budget['function'].isin(function_names)]
positions['FTE'] = pd.to_numeric(positions['hours'], errors='coerce') / 40
budget['FTE'] = pd.to_numeric(budget['hours'], errors='coerce') / 40
positions['salary'] = pd.to_numeric(positions['salary'], errors='coerce')
positions['fringe_benefit'] = pd.to_numeric(positions['fringe_benefit'], errors='coerce')
positions['social_security_contribution'] = pd.to_numeric(positions['social_security_contribution'], errors='coerce')
budget['salary'] = pd.to_numeric(budget['salary'], errors='coerce')
budget['fringe_benefit'] = pd.to_numeric(budget['fringe_benefit'], errors='coerce')
budget['social_security_contribution'] = pd.to_numeric(budget['social_security_contribution'], errors='coerce')
positions['total_cost'] = positions['salary'].add(positions['fringe_benefit'], fill_value=0).add(positions['social_security_contribution'], fill_value=0)
budget['total_cost'] = budget['salary'].add(budget['fringe_benefit'], fill_value=0).add(budget['social_security_contribution'], fill_value=0)
positions['start_date'] = pd.to_datetime(positions['start_date'], errors='coerce')
positions['end_date'] = pd.to_datetime(positions['end_date'], errors='coerce')
budget['start_date'] = pd.to_datetime(budget['start_date'], errors='coerce')
budget['end_date'] = pd.to_datetime(budget['end_date'], errors='coerce')
year = datetime.now().year
months = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}
index=['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
headcount_2018 = pd.Series(index=index)
budgeted_headcount_2018 = pd.Series(index=index)
headcount_cost_2018 = pd.Series(index=index)
budgeted_headcount_cost_2018 = pd.Series(index=index)
total_proposed_increase = 0
proposed_monthly_increase = budget.loc[budget['recruitment_status'].isin(['Proposed', 'Approved'])]['FTE'].sum()/12
for month in range(1,13):
total_proposed_increase += proposed_monthly_increase
hc = budget.loc[budget['recruitment_status'].isin(['On-Board', 'Contracted'])]['FTE'].sum()
budgeted_headcount_2018[months[month]] = pd.Series(hc + total_proposed_increase)
for month in range(1,13):
total_proposed_increase += proposed_monthly_increase
hc = budget.loc[budget['recruitment_status'].isin(['On-Board', 'Contracted'])]['total_cost'].sum()
budgeted_headcount_cost_2018[months[month]] = | pd.Series(hc + total_proposed_increase) | pandas.Series |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Utility functions for model generation
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import logging
import math
import numpy as np
import pandas as pd
from datetime import datetime
# -- Private Imports
# -- Globals
logger = logging.getLogger(__name__)
dict_wday_name = {
0: 'W-MON',
1: 'W-TUE',
2: 'W-WED',
3: 'W-THU',
4: 'W-FRI',
5: 'W-SAT',
6: 'W-SUN',
}
# -- Exception classes
# -- Functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def array_transpose(a):
"""
Transpose a 1-D numpy array
:param a: An array with shape (n,)
:type a: numpy.Array
:return: The original array, with shape (n,1)
:rtype: numpy.Array
"""
return a[np.newaxis, :].T
# TODO: rework to support model composition
def model_requires_scaling(model):
"""
Given a :py:class:`anticipy.forecast_models.ForecastModel`
return True if the function requires scaling a_x
:param model: A get_model_<modeltype> function from
:py:mod:`anticipy.model.periodic_models` or
:py:mod:`anticipy.model.aperiodic_models`
:type model: function
:return: True if function is logistic or sigmoidal
:rtype: bool
"""
requires_scaling = model is not None and model.name in [
'logistic',
'sigmoid'
]
return requires_scaling
def apply_a_x_scaling(a_x, model=None, scaling_factor=100.0):
"""
Modify a_x for forecast_models that require it
:param a_x: x axis of time series
:type a_x: numpy array
:param model: a :py:class:`anticipy.forecast_models.ForecastModel`
:type model: function or None
:param scaling_factor: Value used for scaling t_values for logistic models
:type scaling_factor: float
:return: a_x with scaling applied, if required
:rtype: numpy array
"""
if model_requires_scaling(model): # todo: check that this is still useful
a_x = a_x / scaling_factor
return a_x
dict_freq_units_per_year = dict(
A=1.0,
Y=1.0,
D=365.0,
W=52.0,
M=12,
Q=4,
H=24 * 365.0
)
dict_dateoffset_input = dict(
Y='years',
A='years',
M='months',
W='weeks',
D='days',
H='hours'
)
def get_normalized_x_from_date(s_date):
"""Get column of days since Monday of first date"""
date_start = s_date.iloc[0]
# Convert to Monday
date_start = date_start - pd.to_timedelta(date_start.weekday(), unit='D')
s_x = (s_date - date_start).dt.days
return s_x
def get_s_x_extrapolate(
date_start_actuals,
date_end_actuals,
model=None,
freq=None,
extrapolate_years=2.5,
scaling_factor=100.0,
x_start_actuals=0.):
"""
Return a_x series with DateTimeIndex, covering the date range for the
actuals, plus a forecast period.
:param date_start_actuals: date or numeric index for first actuals sample
:type date_start_actuals: str, datetime, int or float
:param date_end_actuals: date or numeric index for last actuals sample
:type date_end_actuals: str, datetime, int or float
:param extrapolate_years:
:type extrapolate_years: float
:param model:
:type model: function
:param freq: Time unit between samples. Supported units are 'W' for weekly
samples, or 'D' for daily samples. (untested) Any date unit or time
unit accepted by numpy should also work, see
https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html#arrays-dtypes-dateunits # noqa
:type freq: basestring
:param shifted_origin: Offset to apply to a_x
:type shifted_origin: int
:param scaling_factor: Value used for scaling a_x for certain model
functions
:type scaling_factor: float
:param x_start_actuals: numeric index for the first actuals sample
:type x_start_actuals: int
:return: Series of floats with DateTimeIndex. To be used as (a_date, a_x)
input for a model function.
:rtype: pandas.Series
The returned series covers the actuals time domain plus a forecast period
lasting extrapolate_years, in years.
The number of additional samples for the forecast period is
time_resolution * extrapolate_years, rounded down
"""
if isinstance(date_start_actuals, str) or \
isinstance(date_start_actuals, datetime): # Use dates if available
date_start_actuals = pd.to_datetime(date_start_actuals)
date_end_actuals = pd.to_datetime(date_end_actuals)
weekday_adjustment = date_start_actuals.weekday()
expected_freq = dict_wday_name.get(weekday_adjustment)
if freq is None: # Default frequency
freq = expected_freq
else:
if freq.startswith('W'):
assert expected_freq == freq, \
'Error: with weekly frequency, freq ' \
'parameter must match weekday of date_start_actuals:' \
' {} - {} , {}' \
.format(freq, expected_freq, date_start_actuals)
freq_short = freq[0:1] # Changes e.g. W-MON to W
# freq_units_per_year = 52.0 if freq_short=='W' else 365.0
# Todo: change to dict to support more frequencies
freq_units_per_year = dict_freq_units_per_year.get(freq_short, 365.0)
extrapolate_units = extrapolate_years * freq_units_per_year
offset_input = {dict_dateoffset_input.get(freq_short):
extrapolate_units}
date_end_forecast = date_end_actuals + \
pd.DateOffset(**offset_input)
i_date = pd.date_range(
date_start_actuals,
date_end_forecast,
freq=freq,
name='date')
s_date = pd.Series(i_date)
# Get days passed since date_start, then add x_start_actuals
s_x = (s_date - date_start_actuals).dt.days + x_start_actuals
s_x.index = i_date
else:
# Otherwise, use numeric index
# we extrapolate future samples equal to 100*extrapolate_years
index = pd.Index(
np.arange(
date_start_actuals,
date_end_actuals +
100 *
extrapolate_years))
s_x = pd.Series(
index=index,
data=np.arange(
x_start_actuals,
x_start_actuals + index.size)) + x_start_actuals
if model_requires_scaling(model):
s_x = s_x / scaling_factor
return s_x
# Forecast Selection Functions
def get_aic_c(fit_error, n, n_params):
"""
This function implements the corrected Akaike Information Criterion (AICc)
taking as input a given fit error and data/model degrees of freedom.
We assume that the residuals of the candidate model are distributed
according to independent identical normal distributions with zero mean.
Hence, we can use define the AICc as
.. math::
AICc = AIC + \\frac{2k(k+1)}{n-k-1} =
2k + n \\log\\left(\\frac{E}{n}\\right) + \\frac{2k(k+1)}{n-k-1},
where :math:`k` and :math:`n` denotes the model and data degrees of
freedom respectively, and :math:`E`
denotes the residual error of the fit.
:param fit_error: Residual error of the fit
:type fit_error: float
:param n: Data degrees of freedom
:type n: int
:param n_params: Model degrees of freedom
:type n_params: int
:return: Corrected Akaike Information Criterion (AICc)
:rtype: float
Note:
- see AIC in `Wikipedia article on the AIC
<https://en.wikipedia.org/wiki/Akaike_information_criterion>`_.
"""
# First, deal with corner cases that can blow things up with division by
# zero
if (n <= n_params + 1) or (n == 0):
aux = n - n_params - 1
raise ValueError(
'ERROR: Time series too short for AIC_C: (n = ' +
str(n) +
', n - n_params - 1 = ' +
str(aux) +
')')
elif fit_error == 0.0:
if n_params == 1:
aicc = -float("inf")
else:
# This can lead to suboptimal model selection when we have
# multiple perfect fits - we use a patch instead
# aicc = -float("inf")
fit_error = 10 ** -320
aicc = n * math.log(fit_error / n) + 2 * n_params + \
(2 * n_params * (n_params + 1) / (n - n_params - 1))
else:
# Actual calculation of the AICc
aicc = n * math.log(fit_error / n) + 2 * n_params + \
(2 * n_params * (n_params + 1) / (n - n_params - 1))
return aicc
def get_s_aic_c_best_result_key(s_aic_c):
# Required because aic_c can be -inf, that value is not compatible with
# pd.Series.argmin()
if s_aic_c.empty or s_aic_c.isnull().all():
return None
if (s_aic_c.values == -np.inf).any():
(key_best_result,) = (s_aic_c == -np.inf).to_numpy().nonzero()[0]
key_best_result = s_aic_c.index[key_best_result.min()]
else:
key_best_result = s_aic_c.argmin()
return key_best_result
def detect_freq(a_date):
if isinstance(a_date, pd.DataFrame):
if 'date' not in a_date.columns:
return None
else:
a_date = a_date.date
s_date = pd.Series(a_date).sort_values().drop_duplicates()
min_date_delta = s_date.diff().min()
if pd.isnull(min_date_delta):
return None
elif min_date_delta == pd.Timedelta(1, unit='h'):
return 'H'
elif min_date_delta == pd.Timedelta(7, unit='D'):
# Weekly seasonality - need to determine day of week
min_date_wday = s_date.min().weekday()
return dict_wday_name.get(min_date_wday, 'W')
elif min_date_delta >= pd.Timedelta(28, unit='d') and \
min_date_delta <= pd.Timedelta(31, unit='d'):
# MS is month start, M is month end. We use MS if all dates match first
# of month
if s_date.dt.day.max() == 1:
return 'MS'
else:
return 'M'
elif min_date_delta >= pd.Timedelta(89, unit='d') and \
min_date_delta <= pd.Timedelta(92, unit='d'):
return 'Q'
elif min_date_delta >= pd.Timedelta(365, unit='d') and \
min_date_delta <= | pd.Timedelta(366, unit='d') | pandas.Timedelta |
import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from ..models.kalman_filter import KalmanFilter
from ..models.filtering_model import MovingAverage
from ..timeseries import TimeSeries
from ..utils import timeseries_generation as tg
class KalmanFilterTestCase(DartsBaseTestClass):
def test_kalman(self):
""" KalmanFilter test.
Creates an increasing sequence of numbers, adds noise and
assumes the kalman filter predicts values closer to real values
"""
testing_signal = np.arange(1, 5, 0.1)
noise = np.random.normal(0, 0.7, testing_signal.shape)
testing_signal_with_noise = testing_signal + noise
df = | pd.DataFrame(data=testing_signal_with_noise, columns=['signal']) | pandas.DataFrame |
# coding: utf-8
# In[1]:
#IMPORT REQUISTITE LIBRARIES
from datadownloader.MeetupClients import MeetUpClients
import json
import pandas as pd
from datadownloader.Utils.Logging import LoggingUtil
from datetime import datetime
import multiprocessing as mp
from functools import partial
import numpy as np
import sys
import timeit
import pickle
cores=mp.cpu_count()-1
#opfolder='/media/oldmonk/SAMSUNG/DATA_ABHISHEK/MEETUP_PROJECT/techall/'
opfolder='deep-learning/'
print(str(datetime.now()))
logfile=LoggingUtil(opfolder+'Logs/','CHECK_FILTER'+str(datetime.now())+'.txt')
# In[2]:
#FILTE GROUPS ON SO TAGS
#group_pd=pd.read_csv('Data/Groups/Tech_Groups.csv')
sotags=pd.read_csv(opfolder+'Data/SOTags/ALL.csv')
filterlist=['software development']
cattofind="34"
#topic='None'
topic='deep-learning'
group_pd=pd.read_csv(opfolder+"Data/Groups/"+cattofind+'_'+topic+'_Groups.csv')
#filterlist=['open-source']
#filterlist=['software development']
filterlist=[str(tg).lower() for tg in sotags['TagName'].tolist()]
logstr="Loaded stackoveflow tags :::" + str(len(filterlist))
logfile.Log(logstr)
print(logstr)
topicsmeetup=[]
topicsmeetup_intersect=[]
'''
def CheckSOTags(topics):
if(type(topics)==str):
topics=eval(topics)
if(len(topics)==0):
return False
else:
try:
for topic_str in topics:
#SAVE ORG TOPIC NAME IN MEETUP
topicsmeetup.append(topic_str['name'])
#CHECK IF ORG TOPIC STRING IN SO TAGS WHEN CONVERTED TO LOWER CASE
if(topic_str['name'].lower() in filterlist):
if(topic_str['name'].lower() not in topicsmeetup_intersect):
topicsmeetup_intersect.append(topic_str['name'].lower())
return True
#Check if Original Topic when converrted to SOFormat in SO Tags
tpcs=topic_str['name'].split()
tpcs_so_format=("-".join(tpcs)).lower()
if(tpcs_so_format in filterlist):
if(tpcs_so_format not in topicsmeetup_intersect):
topicsmeetup_intersect.append(tpcs_so_format)
return True
##heck if Original Topic when words in Topics Considered to SOFormat in SO Tags
for t in tpcs:
if(t.lower() in filterlist):
if(t.lower() not in topicsmeetup_intersect):
topicsmeetup_intersect.append(t.lower())
return True
return False
except :
print(" Exception Occured " + str(sys.exc_info()[0]))
return False
'''
def CheckSOTags(topics):
if(type(topics)==str):
topics=eval(topics)
if(len(topics)==0):
return (False,"No topics attached to this group ",None)
else:
try:
for topic_str in topics:
#SAVE ORG TOPIC NAME IN MEETUP
#topicsmeetup.append(topic_str['name'])
#CHECK IF ORG TOPIC STRING IN SO TAGS WHEN CONVERTED TO LOWER CASE
if(topic_str['name'].lower() in filterlist):
#if(topic_str['name'].lower() not in topicsmeetup_intersect):
# topicsmeetup_intersect.append(topic_str['name'].lower())
return (True,topic_str['name'],topic_str['name'].lower())
#Check if Original Topic when converrted to SOFormat in SO Tags
tpcs=topic_str['name'].split()
tpcs_so_format=("-".join(tpcs)).lower()
if(tpcs_so_format in filterlist):
#if(tpcs_so_format not in topicsmeetup_intersect):
# topicsmeetup_intersect.append(tpcs_so_format)
return (True,topic_str['name'],tpcs_so_format)
##heck if Original Topic when words in Topics Considered to SOFormat in SO Tags
for t in tpcs:
if(t.lower() in filterlist):
#if(t.lower() not in topicsmeetup_intersect):
# topicsmeetup_intersect.append(t.lower())
return (True,topic_str['name'],t.lower())
return (False,topic_str['name'],None)
except :
print(" Exception Occured " + str(sys.exc_info()[0]))
return (False,topic_str['name'],None)
def ProcessChunk(df):
df_so=df['topics'].apply(CheckSOTags)
return df_so
def SWTag(topics,filterlist):
if(type(topics)==str):
topics=eval(topics)
if(len(topics)==0):
return (False,"No topics attached to this group ",None)
else:
try:
for topic_str in topics:
if(topic_str['name'].lower() in filterlist):
return (True,topic_str['name'],topic_str['name'].lower())
return (False,topic_str['name'],None)
except :
print(" Exception Occured " + str(sys.exc_info()[0]))
return (False,topic_str['name'],None)
def ProcessChunkSW(df):
df_so=df['topics'].apply(CheckSWTag,['Software Development'])
return df_so
#group_pd_software=group_pd['topics'].apply(CheckSOTags)
#group_pd['topics']
# In[3]:
#Filter Grousp By Prescence of Software Development
cores=mp.cpu_count()-1
def ParallelApply(df,func):
chunks=np.array_split(df,cores)
parpool=mp.Pool(cores)
changed_df=pd.concat(parpool.map(ProcessChunk,chunks))
parpool.close()
parpool.join()
return changed_df
def ParallelApplySW(df,func):
chunks=np.array_split(df,cores)
parpool=mp.Pool(cores)
changed_df=pd.concat(parpool.map(ProcessChunkSW,chunks))
parpool.close()
parpool.join()
return changed_df
'''
logstr= " filtering groups which contain Software Development Tag "
logfile.Log(logstr)
print(logstr)
softwar_filter=ParallelApplySW(group_pd,ParallelApplySW)
group_pd_software=group_pd[pd.Series([i[0] for i in softwar_filter])]
group_pd_software.to_csv(opfolder+"Data/Groups/"+cattofind+'_Groups_FilteredSSW.csv',index=False)
logstr= str(group_pd_software.shape[0]) +" groups found which contain Software Development Tag"
logfile.Log(logstr)
print(logstr)
so_software_filter=ParallelApply(group_pd_software,ParallelApply)
topicsmeetup=set(i[1] for i in softwar_filter)
topicsmeetup_intersect=set([i[2] for i in list(softwar_filter) if i[2] is not None])
group_pd_so_software=group_pd_software[pd.Series([i[0] for i in so_software_filter])]
group_pd_so_software.to_csv(opfolder+"Data/Groups/"+cattofind+'_Groups_FilteredSO_SW.csv',index=False)
logstr= str(group_pd_software.shape[0]) +" groups found which contain Stack Overflow Tag and Softwar Development Tag"
logfile.Log(logstr)
print(logstr)
'''
logstr= " filtering groups which contain Stack OVerflow Tag "
logfile.Log(logstr)
print(logstr)
softwar_filter=ParallelApply(group_pd,ParallelApply)
topicsmeetup=set(i[1] for i in softwar_filter)
topicsmeetup_intersect=set([i[2] for i in list(softwar_filter) if i[2] is not None])
group_pd_software=group_pd[pd.Series([i[0] for i in softwar_filter])]
#group_pd_software.shape
group_pd_software.to_csv(opfolder+"Data/Groups/"+cattofind+'_Groups_FilteredSO.csv',index=False)
logstr= str(group_pd_software.shape[0]) +" groups found which contain Stack OVerflow Tag"
logfile.Log(logstr)
print(logstr)
pickle.dump(group_pd_software,open(opfolder+'Data/Pickle/group_pd_software.p','wb'))
# In[4]:
#FILTER GROUPS ON MEMBERS
#group_pd_software=pd.read_csv('Data/Groups/Tech_Groups_FilteredSO.csv')
#group_pd=pd.read_csv('Data/Groups/Tech_Groups.csv')
group_pd_grtr_10=group_pd_software[group_pd['members']>=10]
logstr="Groups which have atelast 10 members :::" + str(group_pd_grtr_10.shape[0])
logfile.Log(logstr)
print(logstr)
# In[5]:
import pandas as pd
group_event_counts=pd.read_csv(opfolder+'Data/events.csv',names=['groupid','EventCount'])
event_count_Final=pd.DataFrame()
try:
event_count_failed_rep=pd.read_csv(opfolder+'Data/events_count_failed.csv',names=['groupid','EventCount'])
event_count_Final=group_event_counts[group_event_counts['EventCount']!=-1]['groupid'].tolist()+event_count_failed_rep['groupid'].tolist()
except:
event_count_Final=group_event_counts[group_event_counts['EventCount']!=-1]
#event_count_Final_df=group_event_counts[group_event_counts['EventCount']!=-1]+event_count_failed_rep
groups_filtered_events_rej= | pd.concat([group_event_counts[group_event_counts['EventCount']!=-1],event_count_failed_rep]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Analysis of Cryptocurrency Investments
#
# In this analysis report, I will perform exploratory data analysis and build machine learning models to predict market prices in future 30 days for the above 7 cryptocurrencies.
# [1. Prepare Data Set](#1)
# - [Load Python Packages](#1-1)
# - [Load and Prepare Data Set](#1-2)
#
# [2. Data Quality Assessment](#2)
# - [Check Missing Values](#2-1)
# - [Check Duplicated Values](#2-2)
#
# [3. Exploratory Data Analysis and Feature Engineering](#3)
# - [1.Market Capitalization and Transaction Volume](#3-1)
# - [2.Price Fluctuation of Cryptocurrencies](#3-2)
# - [3.Moving Averages and Price Trend](#3-3)
# - [4.Market Prices of Cryptocurrencies](#3-4)
# - [5.Return Ratio](#3-5)
# - [6.Candlestick Charts Using Plotly (BitCoin)](#3-6)
#
# [4. Building Models - Predicting Price for Cryptocurrencies](#4)
# - [Prepare Data for Models](#4-1)
# - [Applying Machine Learning Models](#4-2)
# - [Prices Prediction](#4-3)
#
# [5. Conclusion - Investment Suggestion](#5)
#
# [6. Future Work](#6)
#
# [7. Reference](#7)
#
# ***
# ## 1. Prepare Data Set<a id="1"></a>
# ### Load Python Packages<a id="1-1"></a>
import numpy as np
import pandas as pd
import datetime as dt
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_style("whitegrid")
from plotly import tools
import plotly.offline as py
import plotly.io as io
#py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.linear_model import BayesianRidge, ElasticNetCV
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import sys
dir=sys.argv[1];
datafile=sys.argv[2];
# ### Load and Prepare Data Set<a id="1-2"></a>
# load data set
data = pd.read_csv(datafile, parse_dates=['date'],index_col='date')
#
current_date = data.index[len(data)-1];
print("current date : " + str(current_date))
title_tstamp = ( " \n[ updated on " + str(current_date) + " ]");
# display appointment data set
data.head();
data = data[data.symbol.isin(['BTC','SNM', 'GNT', 'RLC'])]
# display total number of records for the 7 cryptocurrencies
data.name.value_counts()
# display data volumn and types
data.info()
# ## 2. Data Quality Assessment<a id="2"></a>
# ### Check Missing Values<a id="2-1"></a>
# check if data set contains missing values
print(data.isnull().sum())
# assert that there are no missing values
assert data.notnull().all().all()
# No missing value exist in this data set.
# ### Check Duplicated Values<a id="2-2"></a>
# check if data set contains duplicated records
print(data.duplicated().sum())
# There is no duplicated appointments record in this data set. The data is clean.
# ## 3. Exploratory Data Analysis and Feature Engineering<a id="3"></a>
# ### Market Capitalization and Transaction Volume<a id="3-1"></a>
# Check market capitalization and transaction volume for each cryptocurrency we choose.
# plot market capitalization
rlc = data[data['symbol']=='RLC']
gnt = data[data['symbol']=='GNT']
snm = data[data['symbol']=='SNM']
plt.figure(figsize=(15,8))
#(bitcoin['market']/1000000).plot(color='darkorange', label='Bitcoin')
(rlc['market']/1000000).plot(color='darkorange', label='iExec')
(gnt['market']/1000000).plot(color='grey', label='GOLEM')
(snm['market']/1000000).plot(color='blue', label='sonM')
plt.legend()
plt.xlabel('date')
plt.ylabel('Market cap in Million US Dollar')
plt.title('Cloud Computing Cryptocurrency Market Cap)' + title_tstamp)
plt.savefig(dir + "Cryptocurrency_Market_Cap")
## volume 2019
data_m = data['2019-01-01':'2019-04-15']
rlc = data_m[data_m['symbol']=='RLC']
gnt = data_m[data_m['symbol']=='GNT']
snm = data_m[data_m['symbol']=='SNM']
plt.figure(figsize=(15,8))
(rlc['market']/1000000).plot(color='darkorange', label='iExec')
(gnt['market']/1000000).plot(color='grey', label='GOLEM')
(snm['market']/1000000).plot(color='blue', label='sonM')
#plt.axvline(dt.datetime(2019, 12, 12),color='black')
plt.legend()
plt.xlabel('time')
plt.title('Cryptocurrency Transaction Volume (Million) in 2019' + title_tstamp)
#plt.show()
plt.savefig(dir + 'Cryptocurrency_Transaction_Volume_in_2019')
data = data['2019-01-01':]
# Check opening, closing, highest, and lowest price for each of the cryptocurrency.
data['oc_diff']=data['close']-data['open']
data.head()
# Difference between opening and closing price
data['oc_diff']=data['close']-data['open']
rlc = data[data['symbol']=='RLC']
gnt = data[data['symbol']=='GNT']
snm = data[data['symbol']=='SNM']
plt.figure(figsize=(15,8))
(rlc['oc_diff']).plot(color='darkorange', label='RLC')
(gnt['oc_diff']).plot(color='grey', label='GNT')
(snm['oc_diff']).plot(color='blue', label='SNM')
plt.text(20,0.05,'test',verticalalignment='bottom', horizontalalignment='center', fontsize=15)
plt.xlabel('time')
plt.ylabel('price in USD')
plt.title('Historical difference between daily opening price and daily closing price of the 3 cloud Cryptocurrencies since 2019' + title_tstamp)
plt.legend()
#plt.show()
plt.savefig(dir + 'Historical_difference_between_opening_price_and_closing_price_of_cloud_Crypto_since_2019')
# Average difference for each cryptocurrency
ave_diff={'rlc':[(rlc['oc_diff']).mean()],
'gnt':[(gnt['oc_diff']).mean()],
'snm':[(snm['oc_diff']).mean()]}
pd.DataFrame(ave_diff, index=['avg.diff'])
# To get a better understanding of difference between daily opening and closing price,
# we calculated the average difference as well as daily price spread for each cryptocurrency.
# Differences of daily highest price and lowest price
plt.figure(figsize=(15,8))
(rlc['spread']).plot(color='grey', label='RLC')
(gnt['spread']).plot(color='blue', label='GNT')
(snm['spread']).plot(color='green', label='SNM')
plt.xlabel('time')
plt.ylabel('price in USD')
plt.title('Historical daily price spread of cloud Crypto,\n indicator of value variation' + title_tstamp)
plt.legend()
#plt.show()
plt.savefig(dir + 'Historical_price_spread_of_cloud_Crypto')
# By plotting the spread (difference) between daily highest and lowest price, we found that:
# Average spread for each cryptocurrency
ave_spread={'rlc':[(rlc['spread']).mean()],
'gnt':[(gnt['spread']).mean()],
'snm':[(snm['spread']).mean()]}
pd.DataFrame(ave_spread, index=['avg.spread'])
# calculate 5-day moving averages
_rlc = rlc[['close']]
_rlc.columns = ['RLC']
rlc_ma=_rlc.rolling(window=5).mean()
_gnt = gnt[['close']]
_gnt.columns = ['GNT']
gnt_ma=_gnt.rolling(window=5).mean()
_snm = snm[['close']]
_snm.columns = ['SNM']
snm_ma=_snm.rolling(window=5).mean()
# create matrix of close price only for later use
close = pd.concat([_rlc,_gnt,_snm], axis=1)
close_ma = pd.concat([rlc_ma,gnt_ma,snm_ma], axis=1)
close_ma.tail()
# plot moving average for closing price for cryptocurrencies
close_ma.plot(figsize=(15,8))
plt.title('5-Day Moving Average on Daily Closing Price, fluctuation indicator' + title_tstamp)
plt.xlabel('time')
plt.ylabel('price in USD')
plt.savefig(dir + '5-Day_Moving_Average_on_Daily_Closing_Price')
# calculate daily average price
data['daily_avg'] = (data['open'] + data['high'] + data['low'] + data['close']) / 4
bitcoin = data[data['symbol']=='BTC']
rlc = data[data['symbol']=='RLC']
gnt = data[data['symbol']=='GNT']
snm = data[data['symbol']=='SNM']
plt.figure(figsize=(15,8))
#(bitcoin['daily_avg']).plot(color='brown', label='btc')
(rlc['daily_avg']).plot(color='grey', label='rlc')
(gnt['daily_avg']).plot(color='blue', label='gnt')
(snm['daily_avg']).plot(color='yellow', label='snm')
plt.xlabel('time')
plt.ylabel('price in USD')
plt.title('Historical daily average price of cloud Crypto,\n (open+high+low+close)/4' + title_tstamp)
plt.legend()
#plt.show()
plt.savefig(dir + 'Historical_daily_average_price_of_cloud_Crypto_since_2019')
# #### Plot individual daily open, high, low, close prices
plt.figure(figsize=(15, 12))
plt.subplot(4,1,1)
plt.plot(bitcoin[['open','high','low','close']])
plt.ylabel('price in USD')
plt.title('Historical daily average price of BitCoin since 2019'+ title_tstamp)
plt.legend(['open','high','low','close'])
plt.subplot(4,1,2)
plt.plot(rlc[['open','high','low','close']])
plt.ylabel('price in USD')
plt.title('Historical daily average price of RLC since 2019'+ title_tstamp)
plt.legend(['open','high','low','close'])
plt.subplot(4,1,3)
plt.plot(gnt[['open','high','low','close']])
plt.ylabel('price in USD')
plt.title('Historical daily average price of GNT since 2019'+ title_tstamp)
plt.legend(['open','high','low','close'])
plt.subplot(4,1,4)
plt.plot(snm[['open','high','low','close']])
plt.ylabel('price in USD')
plt.title('Historical daily average price of SNM since 2019'+ title_tstamp)
plt.legend(['open','high','low','close'])
#plt.show()
plt.savefig(dir + "Plot_individual_daily_open_high_low_close_prices")
# #### Check Pearson correlation coefficient to prove if BitCoin price influences price of other cryptocurrencies
plt.figure(figsize=(12,6))
sns.heatmap(close.corr(),vmin=0, vmax=1, cmap='coolwarm', annot=True)
plt.title('Correlation Heatmap between RLC, SNT, GNT'+ title_tstamp)
#plt.show()
plt.savefig(dir + 'Correlation_Heatmap')
# return ratio = current value of the cryptocurrency / initial value of the cryptocurrency
returns = close.apply(lambda x: x/x[0])
returns.plot(figsize=(12,6))
plt.ylabel('Return ration')
plt.xlabel('time')
plt.title('Return of each Cryptocurrencies\n return ratio = current value of the cryptocurrency / initial value of the cryptocurrency' + title_tstamp)
plt.savefig(dir + 'Return_of_each_Cryptocurrencies')
# ### Candlestick Charts Using Plotly (BitCoin)<a id="3-6"></a>
# A candlestick chart (also called Japanese candlestick chart) is a style of financial chart used
#to describe price movements of a security, derivative, or currency.
# Each "candlestick" typically shows one day; so for example a one-month chart may show the 20 trading days as 20 "candlesticks".
# We choose to plot a cancdlestick chart for BitCoin
# since everyone's super curious on whether BitCoin is going to be economic bubble or it's still something that worth to invest in.
increasing_color = '#17BECF'
decreasing_color = '#7F7F7F'
data_plotly = []
layout = {
'xaxis': {
'rangeselector': {
'visible': True
}
},
# Adding a volume bar chart for candlesticks is a good practice usually
'yaxis': {
'domain': [0, 0.2],
'showticklabels': False
},
'yaxis2': {
'domain': [0.2, 0.8]
},
'legend': {
'orientation': 'h',
'y': 0.9,
'yanchor': 'bottom'
},
'margin': {
't': 40,
'b': 40,
'r': 40,
'l': 40
}
}
# Defining main chart
trace0 = go.Candlestick(
x=rlc.index, open=rlc['open'], high=rlc['high'],
low=rlc['low'], close=rlc['close'],
yaxis='y2', name='rlc',
increasing=dict(line=dict(color=increasing_color)),
decreasing=dict(line=dict(color=decreasing_color)),
)
data_plotly.append(trace0)
# Adding some range buttons to interact
rangeselector = {
'visible': True,
'x': 0,
'y': 0.8,
'buttons': [
{'count': 1, 'label': 'reset', 'step': 'all'},
{'count': 6, 'label': '6 mo', 'step': 'month', 'stepmode': 'backward'},
{'count': 3, 'label': '3 mo', 'step': 'month', 'stepmode': 'backward'},
{'count': 1, 'label': '1 mo', 'step': 'month', 'stepmode': 'backward'},
]
}
layout['xaxis'].update(rangeselector=rangeselector)
# Setting volume bar chart colors
colors = []
for i, _ in enumerate(rlc.index):
if i != 0:
if rlc['close'].iloc[i] > rlc['close'].iloc[i-1]:
colors.append(increasing_color)
else:
colors.append(decreasing_color)
else:
colors.append(decreasing_color)
trace1 = go.Bar(
x=rlc.index, y=rlc['volume'],
marker=dict(color=colors),
yaxis='y', name='Volume'
)
data_plotly.append(trace1)
# Adding Moving Average
def moving_average(interval, window_size=10):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
trace2 = go.Scatter(
x=rlc.index[5:-5], y=moving_average(rlc['close'])[5:-5],
yaxis='y2', name='Moving Average',
line=dict(width=1)
)
data_plotly.append(trace2)
# Adding boilinger bands
def bollinger_bands(price, window_size=10, num_of_std=5):
rolling_mean = price.rolling(10).mean()
rolling_std = price.rolling(10).std()
upper_band = rolling_mean + (rolling_std * 5)
lower_band = rolling_mean - (rolling_std * 5)
return upper_band, lower_band
bb_upper, bb_lower = bollinger_bands(rlc['close'])
trace3 = go.Scatter(
x=rlc.index, y=bb_upper,
yaxis='y2', line=dict(width=1),
marker=dict(color='#ccc'), hoverinfo='none',
name='Bollinger Bands',
legendgroup='Bollinger Bands'
)
data_plotly.append(trace3)
trace4 = go.Scatter(
x=rlc.index, y=bb_lower,
yaxis='y2', line=dict(width=1),
marker=dict(color='#ccc'), hoverinfo='none',
name='Bollinger Bands', showlegend=False,
legendgroup='Bollinger Bands'
)
data_plotly.append(trace4)
fig = go.Figure(data=data_plotly, layout=layout)
py.iplot(fig, filename='rlc-candlestick')
io.orca.config
io.orca.status
io.write_image(fig, dir + 'cancdlestick_chart_for_rlc.png')
# How to read the candlestick chart:
# <img src='http://www.greenboxmarkets.com/wp-content/uploads/2016/08/japanese-candlesticks-chart-introduction-1.jpg'>
# 1. Blue colored candlestick means if the price of bitcoin is increasing for that day compares to previous day, while the grey candlestick means the price is decreasing.
# 2. The red trend line is indicating the moving average of 10 days.
# 3. Bollinger Bands consist of: an N-period moving average (MA) an upper band at K times an N-period standard deviation above the moving average (MA + Kσ) a lower band at K times an N-period standard deviation below the moving average (MA − Kσ).
# 4. The bottom chart is the time range.
# 5. Each candlestick gives 4 daily values: open, high, low, and close. (If there's less value, that means two or more of the value is the same)
# 6. Based on the chart, and based onthe last candlestick, we can infer from its large body but short wick that the price might keep dropping after February 22nd.
# ## 4. Building Models - Predicting Price for Cryptocurrencies<a id="4"></a>
# ### Prepare Data for Models<a id="4-1"></a>
# I will separate the data set and build model on each cryptocurrencie type.
#
# For data preparation, I will follow below steps:
# 1. remove 'slug', 'name', 'symbol' and ''ranknow' from data set
# 2. move all 'daily_avg' values 30 lines up in our last cell(one month lag) and define a new column, 'daily_avg_After_Month'
# 3. choose 'daily_avg_After_Month' as target and all other variables as predictors
# 4. create train and test data by splitting the data set to 80-20
# 5. create 'X_forecast' using all predictors with NA 'daily_avg_After_Month' (to predict price in next 30 days)
# In[29]:
# droping 'slug' and 'name' as we can just use 'symbol', and droping 'ranknow'.
data=data.drop(['slug', 'name', 'ranknow'], axis=1)
# Bitcoin (BTC)
BTC = data[data.symbol == 'BTC'].copy()
BTC['daily_avg_After_Month']=BTC['daily_avg'].shift(-30)
X_BTC = BTC.dropna().drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
y_BTC = BTC.dropna()['daily_avg_After_Month']
X_train_BTC, X_test_BTC, y_train_BTC, y_test_BTC = train_test_split(X_BTC, y_BTC, test_size=0.2, random_state=43)
X_forecast_BTC = BTC.tail(30).drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
# rlc
RLC = data[data.symbol == 'RLC'].copy()
RLC['daily_avg_After_Month']=RLC['daily_avg'].shift(-30)
X_RLC = RLC.dropna().drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
y_RLC = RLC.dropna()['daily_avg_After_Month']
X_train_RLC, X_test_RLC, y_train_RLC, y_test_RLC = train_test_split(X_RLC, y_RLC, test_size=0.2, random_state=43)
X_forecast_RLC = RLC.tail(30).drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
# gnt
GNT = data[data.symbol == 'GNT'].copy()
GNT['daily_avg_After_Month']=GNT['daily_avg'].shift(-30)
X_GNT = GNT.dropna().drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
y_GNT = GNT.dropna()['daily_avg_After_Month']
X_train_GNT, X_test_GNT, y_train_GNT, y_test_GNT = train_test_split(X_GNT, y_GNT, test_size=0.2, random_state=43)
X_forecast_GNT = GNT.tail(30).drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
# snm
SNM = data[data.symbol == 'SNM'].copy()
SNM['daily_avg_After_Month']=SNM['daily_avg'].shift(-30)
X_SNM = SNM.dropna().drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
y_SNM = SNM.dropna()['daily_avg_After_Month']
X_train_SNM, X_test_SNM, y_train_SNM, y_test_SNM = train_test_split(X_SNM, y_SNM, test_size=0.2, random_state=43)
X_forecast_SNM = SNM.tail(30).drop(['daily_avg_After_Month','symbol','daily_avg'], axis=1)
# ### Applying Machine Learning Models<a id="4-2"></a>
# define regression function
def regression(X_train, X_test, y_train, y_test):
Regressor = {
'Random Forest Regressor': RandomForestRegressor(n_estimators=200),
'Gradient Boosting Regressor': GradientBoostingRegressor(n_estimators=500),
'ExtraTrees Regressor': ExtraTreesRegressor(n_estimators=500, min_samples_split=5),
'Bayesian Ridge': BayesianRidge(),
'Elastic Net CV': ElasticNetCV()
}
for name, clf in Regressor.items():
print(name)
clf.fit(X_train, y_train)
print('R2: {' + str(r2_score(y_test, clf.predict(X_test))) + '}')
print('MAE: {' + str(mean_absolute_error(y_test, clf.predict(X_test))) + '}')
print('MSE: {' + str(mean_squared_error(y_test, clf.predict(X_test))) + '}')
# Bitcoin (BTC)
print('Bitcoin (BTC):')
regression(X_train_BTC, X_test_BTC, y_train_BTC, y_test_BTC)
# Bitcoin (BTC)
print('iExec (RLC):')
regression(X_train_RLC, X_test_RLC, y_train_RLC, y_test_RLC)
# GNT (GNT)
print('GNT (GNT):')
regression(X_train_GNT, X_test_GNT, y_train_GNT, y_test_GNT)
# SNM
print('sonm (SNM):')
regression(X_train_SNM, X_test_SNM, y_train_SNM, y_test_SNM)
# As for all the five algorithms, I will end up taking Extra Trees Regressor for all cryptos
# but XRP and LTC, since this algorithm perform better than all other algorithms in all indicators(R2, MAE and MSE).
# For XRP and LTC, the Random Forest Regressor perform distinctly better than all other algorithms.
# ### Prices Prediction<a id="4-3"></a>
# define prediction function
def prediction(name, X, y, X_forecast):
if name in ['XRP', 'LTC']:
model = RandomForestRegressor(n_estimators=200)
else:
model = ExtraTreesRegressor(n_estimators=500, min_samples_split=5)
model.fit(X, y)
target = model.predict(X_forecast)
return target
# calculate forecasted prices for next 30 days
forecasted_BTC = prediction('BTC', X_BTC, y_BTC, X_forecast_BTC)
forecasted_RLC = prediction('RLC', X_RLC, y_RLC, X_forecast_RLC)
forecasted_GNT = prediction('GNT', X_GNT, y_GNT, X_forecast_GNT)
forecasted_SNM = prediction('SNM', X_SNM, y_SNM, X_forecast_SNM)
# define index for next 30 days
last_date=data.iloc[-1].name
modified_date = last_date + dt.timedelta(days=1)
new_date = pd.date_range(modified_date,periods=30,freq='D')
# assign prediction to newly defined index
forecasted_BTC = | pd.DataFrame(forecasted_BTC, columns=['daily_avg'], index=new_date) | pandas.DataFrame |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import functools
from contextlib import contextmanager
from numbers import Integral
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.core.dtypes.cast import find_common_type
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pass
from ..core import Entity, ExecutableTuple
from ..lib.mmh3 import hash as mmh_hash
from ..tensor.utils import dictify_chunk_size, normalize_chunk_sizes
from ..utils import tokenize, sbytes
def hash_index(index, size):
def func(x, size):
return mmh_hash(sbytes(x)) % size
f = functools.partial(func, size=size)
idx_to_grouped = index.groupby(index.map(f))
return [idx_to_grouped.get(i, list()) for i in range(size)]
def hash_dataframe_on(df, on, size, level=None):
if on is None:
idx = df.index
if level is not None:
idx = idx.to_frame(False)[level]
hashed_label = pd.util.hash_pandas_object(idx, categorize=False)
elif callable(on):
# todo optimization can be added, if ``on`` is a numpy ufunc or sth can be vectorized
hashed_label = pd.util.hash_pandas_object(df.index.map(on), categorize=False)
else:
if isinstance(on, list):
to_concat = []
for v in on:
if isinstance(v, pd.Series):
to_concat.append(v)
else:
to_concat.append(df[v])
data = pd.concat(to_concat, axis=1)
else:
data = df[on]
hashed_label = pd.util.hash_pandas_object(data, index=False, categorize=False)
idx_to_grouped = df.index.groupby(hashed_label % size)
return [idx_to_grouped.get(i, pd.Index([])).unique() for i in range(size)]
def hash_dtypes(dtypes, size):
hashed_indexes = hash_index(dtypes.index, size)
return [dtypes[index] for index in hashed_indexes]
def sort_dataframe_inplace(df, *axis):
for ax in axis:
df.sort_index(axis=ax, inplace=True)
return df
def _get_range_index_start(pd_range_index):
try:
return pd_range_index.start
except AttributeError: # pragma: no cover
return pd_range_index._start
def _get_range_index_stop(pd_range_index):
try:
return pd_range_index.stop
except AttributeError: # pragma: no cover
return pd_range_index._stop
def _get_range_index_step(pd_range_index):
try:
return pd_range_index.step
except AttributeError: # pragma: no cover
return pd_range_index._step
def is_pd_range_empty(pd_range_index):
start, stop, step = _get_range_index_start(pd_range_index), \
_get_range_index_stop(pd_range_index), \
_get_range_index_step(pd_range_index)
return (start >= stop and step >= 0) or (start <= stop and step < 0)
def decide_dataframe_chunk_sizes(shape, chunk_size, memory_usage):
"""
Decide how a given DataFrame can be split into chunk.
:param shape: DataFrame's shape
:param chunk_size: if dict provided, it's dimension id to chunk size;
if provided, it's the chunk size for each dimension.
:param memory_usage: pandas Series in which each column's memory usage
:type memory_usage: pandas.Series
:return: the calculated chunk size for each dimension
:rtype: tuple
"""
from ..config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
average_memory_usage = memory_usage / shape[0]
nleft = len(shape) - len(chunk_size)
if nleft < 0:
raise ValueError("chunks have more than two dimensions")
if nleft == 0:
return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape))))
max_chunk_size = options.chunk_store_limit
# for the row side, along axis 0
if 0 not in chunk_size:
row_chunk_size = []
row_left_size = shape[0]
else:
row_chunk_size = normalize_chunk_sizes((shape[0],), (chunk_size[0],))[0]
row_left_size = -1
# for the column side, along axis 1
if 1 not in chunk_size:
col_chunk_size = []
col_chunk_store = []
col_left_size = shape[1]
else:
col_chunk_size = normalize_chunk_sizes((shape[1],), (chunk_size[1],))[0]
acc = [0] + np.cumsum(col_chunk_size).tolist()
col_chunk_store = [average_memory_usage[acc[i]: acc[i + 1]].sum()
for i in range(len(col_chunk_size))]
col_left_size = -1
while True:
nbytes_occupied = np.prod([max(it) for it in (row_chunk_size, col_chunk_store) if it])
dim_size = np.maximum(int(np.power(max_chunk_size / nbytes_occupied, 1 / float(nleft))), 1)
if col_left_size == 0:
col_chunk_size.append(0)
if row_left_size == 0:
row_chunk_size.append(0)
# check col first
if col_left_size > 0:
cs = min(col_left_size, dim_size)
col_chunk_size.append(cs)
start = int(np.sum(col_chunk_size[:-1]))
col_chunk_store.append(average_memory_usage.iloc[start: start + cs].sum())
col_left_size -= cs
if row_left_size > 0:
max_col_chunk_store = max(col_chunk_store)
cs = min(row_left_size, int(max_chunk_size / max_col_chunk_store))
row_chunk_size.append(cs)
row_left_size -= cs
if col_left_size <= 0 and row_left_size <= 0:
break
return tuple(row_chunk_size), tuple(col_chunk_size)
def decide_series_chunk_size(shape, chunk_size, memory_usage):
from ..config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
average_memory_usage = memory_usage / shape[0] if shape[0] != 0 else memory_usage
if len(chunk_size) == len(shape):
return normalize_chunk_sizes(shape, chunk_size[0])
max_chunk_size = options.chunk_store_limit
series_chunk_size = max_chunk_size / average_memory_usage
return normalize_chunk_sizes(shape, int(series_chunk_size))
def parse_index(index_value, *args, store_data=False, key=None):
from .core import IndexValue
def _extract_property(index, tp, ret_data):
kw = {
'_min_val': _get_index_min(index),
'_max_val': _get_index_max(index),
'_min_val_close': True,
'_max_val_close': True,
'_key': key or _tokenize_index(index, *args),
}
if ret_data:
kw['_data'] = index.values
for field in tp._FIELDS:
if field in kw or field == '_data':
continue
val = getattr(index, field.lstrip('_'), None)
if val is not None:
kw[field] = val
return kw
def _tokenize_index(index, *token_objects):
if not index.empty:
return tokenize(index)
else:
return tokenize(index, *token_objects)
def _get_index_min(index):
try:
return index.min()
except ValueError:
if isinstance(index, pd.IntervalIndex):
return None
raise
except TypeError:
return None
def _get_index_max(index):
try:
return index.max()
except ValueError:
if isinstance(index, pd.IntervalIndex):
return None
raise
except TypeError:
return None
def _serialize_index(index):
tp = getattr(IndexValue, type(index).__name__)
properties = _extract_property(index, tp, store_data)
return tp(**properties)
def _serialize_range_index(index):
if is_pd_range_empty(index):
properties = {
'_is_monotonic_increasing': True,
'_is_monotonic_decreasing': False,
'_is_unique': True,
'_min_val': _get_index_min(index),
'_max_val': _get_index_max(index),
'_min_val_close': True,
'_max_val_close': False,
'_key': key or _tokenize_index(index, *args),
'_name': index.name,
'_dtype': index.dtype,
}
else:
properties = _extract_property(index, IndexValue.RangeIndex, False)
return IndexValue.RangeIndex(_slice=slice(_get_range_index_start(index),
_get_range_index_stop(index),
_get_range_index_step(index)),
**properties)
def _serialize_multi_index(index):
kw = _extract_property(index, IndexValue.MultiIndex, store_data)
kw['_sortorder'] = index.sortorder
kw['_dtypes'] = [lev.dtype for lev in index.levels]
return IndexValue.MultiIndex(**kw)
if index_value is None:
return IndexValue(_index_value=IndexValue.Index(
_is_monotonic_increasing=False,
_is_monotonic_decreasing=False,
_is_unique=False,
_min_val=None,
_max_val=None,
_min_val_close=True,
_max_val_close=True,
_key=key or tokenize(*args),
))
if isinstance(index_value, pd.RangeIndex):
return IndexValue(_index_value=_serialize_range_index(index_value))
elif isinstance(index_value, pd.MultiIndex):
return IndexValue(_index_value=_serialize_multi_index(index_value))
else:
return IndexValue(_index_value=_serialize_index(index_value))
def gen_unknown_index_value(index_value, *args):
pd_index = index_value.to_pandas()
if isinstance(pd_index, pd.RangeIndex):
return parse_index(pd.RangeIndex(-1), *args)
elif not isinstance(pd_index, pd.MultiIndex):
return parse_index(pd.Index([], dtype=pd_index.dtype), *args)
else:
i = pd.MultiIndex.from_arrays([c[:0] for c in pd_index.levels],
names=pd_index.names)
return parse_index(i, *args)
def split_monotonic_index_min_max(left_min_max, left_increase, right_min_max, right_increase):
"""
Split the original two min_max into new min_max. Each min_max should be a list
in which each item should be a 4-tuple indicates that this chunk's min value,
whether the min value is close, the max value, and whether the max value is close.
The return value would be a nested list, each item is a list
indicates that how this chunk should be split into.
:param left_min_max: the left min_max
:param left_increase: if the original data of left is increased
:param right_min_max: the right min_max
:param right_increase: if the original data of right is increased
:return: nested list in which each item indicates how min_max is split
>>> left_min_max = [(0, True, 3, True), (4, True, 8, True), (12, True, 18, True),
... (20, True, 22, True)]
>>> right_min_max = [(2, True, 6, True), (7, True, 9, True), (10, True, 14, True),
... (18, True, 19, True)]
>>> l, r = split_monotonic_index_min_max(left_min_max, True, right_min_max, True)
>>> l
[[(0, True, 2, False), (2, True, 3, True)], [(3, False, 4, False), (4, True, 6, True), (6, False, 7, False),
(7, True, 8, True)], [(8, False, 9, True), (10, True, 12, False), (12, True, 14, True), (14, False, 18, False),
(18, True, 18, True)], [(18, False, 19, True), [20, True, 22, True]]]
>>> r
[[(0, True, 2, False), (2, True, 3, True), (3, False, 4, False), (4, True, 6, True)],
[(6, False, 7, False), (7, True, 8, True), (8, False, 9, True)], [(10, True, 12, False), (12, True, 14, True)],
[(14, False, 18, False), (18, True, 18, True), (18, False, 19, True), [20, True, 22, True]]]
"""
left_idx_to_min_max = [[] for _ in left_min_max]
right_idx_to_min_max = [[] for _ in right_min_max]
left_curr_min_max = list(left_min_max[0])
right_curr_min_max = list(right_min_max[0])
left_curr_idx = right_curr_idx = 0
left_terminate = right_terminate = False
while not left_terminate or not right_terminate:
if left_terminate:
left_idx_to_min_max[left_curr_idx].append(tuple(right_curr_min_max))
right_idx_to_min_max[right_curr_idx].append(tuple(right_curr_min_max))
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
elif right_terminate:
right_idx_to_min_max[right_curr_idx].append(tuple(left_curr_min_max))
left_idx_to_min_max[left_curr_idx].append(tuple(left_curr_min_max))
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
elif left_curr_min_max[0] < right_curr_min_max[0]:
# left min < right min
right_min = [right_curr_min_max[0], not right_curr_min_max[1]]
max_val = min(left_curr_min_max[2:], right_min)
assert len(max_val) == 2
min_max = (left_curr_min_max[0], left_curr_min_max[1],
max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if left_curr_min_max[2:] == max_val:
# left max < right min
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
left_curr_min_max[:2] = right_curr_min_max[:2]
elif left_curr_min_max[0] > right_curr_min_max[0]:
# left min > right min
left_min = [left_curr_min_max[0], not left_curr_min_max[1]]
max_val = min(right_curr_min_max[2:], left_min)
min_max = (right_curr_min_max[0], right_curr_min_max[1],
max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if right_curr_min_max[2:] == max_val:
# right max < left min
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
# from left min(left min close) to right min(exclude right min close)
right_curr_min_max[:2] = left_curr_min_max[:2]
else:
# left min == right min
max_val = min(left_curr_min_max[2:], right_curr_min_max[2:])
assert len(max_val) == 2
min_max = (left_curr_min_max[0], left_curr_min_max[1], max_val[0], max_val[1])
left_idx_to_min_max[left_curr_idx].append(min_max)
right_idx_to_min_max[right_curr_idx].append(min_max)
if max_val == left_curr_min_max[2:]:
if left_curr_idx + 1 >= len(left_min_max):
left_terminate = True
else:
left_curr_idx += 1
left_curr_min_max = list(left_min_max[left_curr_idx])
else:
left_curr_min_max[:2] = max_val[0], not max_val[1]
if max_val == right_curr_min_max[2:]:
if right_curr_idx + 1 >= len(right_min_max):
right_terminate = True
else:
right_curr_idx += 1
right_curr_min_max = list(right_min_max[right_curr_idx])
else:
right_curr_min_max[:2] = max_val[0], not max_val[1]
if left_increase is False:
left_idx_to_min_max = list(reversed(left_idx_to_min_max))
if right_increase is False:
right_idx_to_min_max = list(reversed(right_idx_to_min_max))
return left_idx_to_min_max, right_idx_to_min_max
def build_split_idx_to_origin_idx(splits, increase=True):
# splits' len is equal to the original chunk size on a specified axis,
# splits is sth like [[(0, True, 2, True), (2, False, 3, True)]]
# which means there is one input chunk, and will be split into 2 out chunks
# in this function, we want to build a new dict from the out chunk index to
# the original chunk index and the inner position, like {0: (0, 0), 1: (0, 1)}
if increase is False:
splits = list(reversed(splits))
out_idx = itertools.count(0)
res = dict()
for origin_idx, _ in enumerate(splits):
for pos in range(len(splits[origin_idx])):
if increase is False:
o_idx = len(splits) - origin_idx - 1
else:
o_idx = origin_idx
res[next(out_idx)] = o_idx, pos
return res
def _generate_value(dtype, fill_value):
# special handle for datetime64 and timedelta64
dispatch = {
np.datetime64: pd.Timestamp,
np.timedelta64: pd.Timedelta,
pd.CategoricalDtype.type: lambda x: pd.CategoricalDtype([x]),
# for object, we do not know the actual dtype,
# just convert to str for common usage
np.object_: lambda x: str(fill_value),
}
# otherwise, just use dtype.type itself to convert
convert = dispatch.get(dtype.type, dtype.type)
return convert(fill_value)
def build_empty_df(dtypes, index=None):
columns = dtypes.index
# duplicate column may exist,
# so use RangeIndex first
df = pd.DataFrame(columns=pd.RangeIndex(len(columns)), index=index)
length = len(index) if index is not None else 0
for i, d in enumerate(dtypes):
df[i] = pd.Series([_generate_value(d, 1) for _ in range(length)],
dtype=d, index=index)
df.columns = columns
return df
def build_df(df_obj, fill_value=1, size=1):
empty_df = build_empty_df(df_obj.dtypes, index=df_obj.index_value.to_pandas()[:0])
dtypes = empty_df.dtypes
record = [_generate_value(dtype, fill_value) for dtype in dtypes]
if isinstance(empty_df.index, pd.MultiIndex):
index = tuple(_generate_value(level.dtype, fill_value) for level in empty_df.index.levels)
empty_df = empty_df.reindex(
index=pd.MultiIndex.from_tuples([index], names=empty_df.index.names))
empty_df.iloc[0] = record
else:
index = _generate_value(empty_df.index.dtype, fill_value)
empty_df.loc[index] = record
empty_df = pd.concat([empty_df] * size)
# make sure dtypes correct for MultiIndex
for i, dtype in enumerate(dtypes.tolist()):
s = empty_df.iloc[:, i]
if not pd.api.types.is_dtype_equal(s.dtype, dtype):
empty_df.iloc[:, i] = s.astype(dtype)
return empty_df
def build_empty_series(dtype, index=None, name=None):
length = len(index) if index is not None else 0
return pd.Series([_generate_value(dtype, 1) for _ in range(length)],
dtype=dtype, index=index, name=name)
def build_series(series_obj, fill_value=1, size=1, name=None):
empty_series = build_empty_series(series_obj.dtype, name=name,
index=series_obj.index_value.to_pandas()[:0])
record = _generate_value(series_obj.dtype, fill_value)
if isinstance(empty_series.index, pd.MultiIndex):
index = tuple(_generate_value(level.dtype, fill_value) for level in empty_series.index.levels)
empty_series = empty_series.reindex(
index=pd.MultiIndex.from_tuples([index], names=empty_series.index.names))
empty_series.iloc[0] = record
else:
if isinstance(empty_series.index.dtype, pd.CategoricalDtype):
index = None
else:
index = _generate_value(empty_series.index.dtype, fill_value)
empty_series.loc[index] = record
empty_series = pd.concat([empty_series] * size)
# make sure dtype correct for MultiIndex
empty_series = empty_series.astype(series_obj.dtype, copy=False)
return empty_series
def concat_index_value(index_values, store_data=False):
result = pd.Index([])
if not isinstance(index_values, (list, tuple)):
index_values = [index_values]
for index_value in index_values:
if isinstance(index_value, pd.Index):
result = result.append(index_value)
else:
result = result.append(index_value.to_pandas())
return parse_index(result, store_data=store_data)
def build_concatenated_rows_frame(df):
from ..core import OutputType
from .merge.concat import DataFrameConcat
# When the df isn't splitted along the column axis, return the df directly.
if df.chunk_shape[1] == 1:
return df
columns = concat_index_value([df.cix[0, idx].columns_value for idx in range(df.chunk_shape[1])],
store_data=True)
columns_size = columns.to_pandas().size
out_chunks = []
for idx in range(df.chunk_shape[0]):
out_chunk = DataFrameConcat(axis=1, output_types=[OutputType.dataframe]).new_chunk(
[df.cix[idx, k] for k in range(df.chunk_shape[1])], index=(idx, 0),
shape=(df.cix[idx, 0].shape[0], columns_size), dtypes=df.dtypes,
index_value=df.cix[idx, 0].index_value, columns_value=columns)
out_chunks.append(out_chunk)
return DataFrameConcat(axis=1, output_types=[OutputType.dataframe]).new_dataframe(
[df], chunks=out_chunks, nsplits=((chunk.shape[0] for chunk in out_chunks), (df.shape[1],)),
shape=df.shape, dtypes=df.dtypes,
index_value=df.index_value, columns_value=df.columns_value)
def _filter_range_index(pd_range_index, min_val, min_val_close, max_val, max_val_close):
if is_pd_range_empty(pd_range_index):
return pd_range_index
raw_min, raw_max, step = pd_range_index.min(), pd_range_index.max(), _get_range_index_step(pd_range_index)
# seek min range
greater_func = operator.gt if min_val_close else operator.ge
actual_min = raw_min
while greater_func(min_val, actual_min):
actual_min += abs(step)
if step < 0:
actual_min += step # on the right side
# seek max range
less_func = operator.lt if max_val_close else operator.le
actual_max = raw_max
while less_func(max_val, actual_max):
actual_max -= abs(step)
if step > 0:
actual_max += step # on the right side
if step > 0:
return pd.RangeIndex(actual_min, actual_max, step)
return | pd.RangeIndex(actual_max, actual_min, step) | pandas.RangeIndex |
"""
Created on Sat Sep 18 23:11:22 2021
@author: datakind
"""
import logging
import os
import sys
import typing as T
from functools import reduce
from pathlib import Path
import pandas as pd
import requests
from matplotlib import collections
from matplotlib import pyplot as plt
from analysis.acs_correlation import correlation_analysis
from analysis.acs_data import get_acs_data
from analysis.housing_loss_summary import summarize_housing_loss
from analysis.timeseries import create_timeseries
from collection.address_cleaning import remove_special_chars
from collection.address_geocoding import find_state_county_city, geocode_input_data
from collection.address_validation import (
standardize_input_addresses,
validate_address_data,
verify_input_directory,
)
from collection.tigerweb_api import (
create_tigerweb_query,
get_input_data_geometry,
jprint,
rename_baseline,
)
from const import (
ACS_DATA_DICT_FILENAME,
GEOCODED_EVICTIONS_FILENAME,
GEOCODED_FORECLOSURES_FILENAME,
GEOCODED_TAX_LIENS_FILENAME,
GIS_IMPORT_FILENAME,
HOUSING_LOSS_SUMMARY_FILENAME,
HOUSING_LOSS_TIMESERIES_FILENAME,
MAX_YEAR,
MIN_YEAR,
OUTPUT_ALL_HOUSING_LOSS_PLOTS,
OUTPUT_EVICTION_PLOTS,
OUTPUT_FORECLOSURE_PLOTS,
OUTPUT_PATH_GEOCODED_DATA,
OUTPUT_PATH_GEOCODER_CACHE,
OUTPUT_PATH_MAPS,
OUTPUT_PATH_PLOTS,
OUTPUT_PATH_PLOTS_DETAIL,
OUTPUT_PATH_SUMMARIES,
TRACT_BOUNDARY_FILENAME,
)
def load_data(sub_directories: T.List, data_category) -> pd.DataFrame:
"""Load evictions data from csv template
Inputs
------
sub_directories: list of sub-directories
data_category: 'evictions', 'mortgage_foreclosures', 'tax_lien_foreclosures'
parameters: If necessary, parameters to determine narrow down timeframe
or columns of evictions data to return
Outputs
-------
cleaned_df: Processed pandas dataframe for next step of geo matching
"""
for data_dir in sub_directories:
# If this sub directory does not match the data_category, skip it:
if data_category not in str(data_dir):
continue
# If this is right subdirectory, list the files in the directory
data_files = os.listdir(data_dir)
# Alert user if there are no files in the relevant subdirectory
if len(data_files) == 0:
print('\n\u2326', 'Empty sub directory ', data_dir, ' - nothing to process')
return None
else:
print(
'\nSubdirectory of ',
data_dir,
' has ',
len(data_files),
' files in it: ',
data_files,
)
data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import datetime as dt
import requests
import io
import zipfile
from kungfu.series import FinancialSeries
from kungfu.frame import FinancialDataFrame
def download_factor_data(freq='D'):
'''
Downloads factor data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
'''
if freq is 'D':
# Download Carhartt 4 Factors
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
mom = web.DataReader('F-F_Momentum_Factor_daily', 'famafrench', start='1/1/1900')[0]
factors_daily = factors_daily.join(mom)
factors_daily = factors_daily[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_daily.columns = ['Mkt-RF','SMB','HML','Mom','RF']
return FinancialDataFrame(factors_daily)
elif freq is 'M':
# Download Carhartt 4 Factors
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
# mom = web.DataReader('F-F_Momentum_Factor', 'famafrench', start='1/1/1900')[0] #There seems to be a problem with the data file, fix if mom is needed
# factors_monthly = factors_monthly.join(mom)
# factors_monthly = factors_monthly[['Mkt-RF','SMB','HML','Mom ','RF']]
factors_monthly.index = factors_monthly.index.to_timestamp()
# factors_monthly.columns = ['Mkt-RF','SMB','HML','Mom','RF']
factors_monthly.columns = ['Mkt-RF','SMB','HML','RF']
factors_monthly.index = factors_monthly.index+pd.tseries.offsets.MonthEnd(0)
return FinancialDataFrame(factors_monthly)
def download_industry_data(freq='D', excessreturns = True):
'''
Downloads industry data from <NAME>'s website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 49 Industries
industries_daily = web.DataReader("49_Industry_Portfolios_Daily", "famafrench", start='1/1/1900')[0]
industries_daily[(industries_daily <= -99.99) | (industries_daily == -999)] = np.nan #set missing data to NaN
industries_daily = industries_daily.rename_axis('Industry', axis='columns')
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
industries_daily = industries_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return industries_daily
elif freq is 'M':
# Download Fama/French 49 Industries
industries_monthly = web.DataReader("49_Industry_Portfolios", "famafrench", start='1/1/1900')[0]
industries_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
industries_monthly = industries_monthly.rename_axis('Industry', axis='columns')
industries_monthly.index = industries_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
industries_monthly = industries_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
industries_monthly.index = industries_monthly.index+pd.tseries.offsets.MonthEnd(0)
return industries_monthly
def download_25portfolios_data(freq='D', excessreturns = True):
'''
Downloads 25 portfolios data from Kenneth French's website and returns dataframe.
freq can be either 'D' (daily) or 'M' (monthly).
excessreturns is a boolean to define if the the function should return excess returns.
'''
if freq is 'D':
# Download Fama/French 25 portfolios
portfolios_daily = web.DataReader("25_Portfolios_5x5_CSV", "famafrench", start='1/1/1900')[0]
portfolios_daily[(portfolios_daily <= -99.99) | (portfolios_daily == -999)] = np.nan #set missing data to NaN
if excessreturns is True:
factors_daily = web.DataReader("F-F_Research_Data_Factors_daily", "famafrench", start='1/1/1900')[0]
portfolios_daily = portfolios_daily.subtract(factors_daily['RF'], axis=0) #transform into excess returns
return portfolios_daily
elif freq is 'M':
# Download Fama/French 25 portfolios
portfolios_monthly = web.DataReader("25_Portfolios_5x5_Daily_CSV", "famafrench", start='1/1/1900')[0]
portfolios_monthly[(industries_monthly <= -99.99) | (industries_monthly == -999)] = np.nan #set missing data to NaN
portfolios_monthly.index = portfolios_monthly.index.to_timestamp()
if excessreturns is True:
factors_monthly = web.DataReader("F-F_Research_Data_Factors", "famafrench", start='1/1/1900')[0]
factors_monthly.index = factors_monthly.index.to_timestamp()
portfolios_monthly = portfolios_monthly.subtract(factors_monthly['RF'], axis=0) #transform into excess returns
return portfolios_monthly
def download_recessions_data(freq='M', startdate='1/1/1900', enddate=dt.datetime.today()):
'''
Downloads NBER recessions from FRED and returns series.
freq can be either 'D' (daily) or 'M' (monthly).
startdate and enddate define the length of the timeseries.
'''
USREC_monthly = web.DataReader('USREC', 'fred',start = startdate, end=enddate)
if freq is 'M':
return USREC_monthly
if freq is 'D':
first_day = USREC_monthly.index.min() - pd.DateOffset(day=1)
last_day = USREC_monthly.index.max() + pd.DateOffset(day=31)
dayindex = pd.date_range(first_day, last_day, freq='D')
dayindex.name = 'DATE'
USREC_daily = USREC_monthly.reindex(dayindex, method='ffill')
return USREC_daily
def download_jpy_usd_data():
'''
Downloads USD/JPY exchange rate data from FRED and returns series.
'''
jpy = web.DataReader('DEXJPUS', 'fred', start = '1900-01-01')
return jpy
def download_cad_usd_data():
'''
Downloads USD/CAD exchange rate data from FRED and returns series.
'''
cad = web.DataReader('DEXCAUS', 'fred', start = '1900-01-01')
return cad
def download_vix_data():
'''
Downloads VIX index data from FRED and returns series.
'''
vix = web.DataReader('VIXCLS', 'fred', start = '1900-01-01')
return vix
def download_goyal_welch_svar():
'''
Downloads Goyal/Welch SVAR data from Amit Goyal's website and returns DataFrame.
'''
url = 'http://www.hec.unil.ch/agoyal/docs/PredictorData2017.xlsx'
sheet = | pd.read_excel(url, sheet_name='Monthly') | pandas.read_excel |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data as Data # 里面有minibatch实现所需要的DataLoader
from torch.autograd import Variable
from sklearn.utils import shuffle
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
import sys
import numpy as np
from sklearn import preprocessing
from BiNE_graph_utils import GraphUtils
import random
import math
import os
import pandas as pd
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import average_precision_score, auc, precision_recall_fscore_support
from copy import deepcopy
from data import *
from prediction import *
from evaluation import *
from UBCF_implicit import *
from UBCF_explicit import *
from PhysRec import *
from FunkSVD import *
from PMF import *
from SAE import *
from NCF import *
from FM2 import *
from BiNE_graph import *
from BiNE_graph_utils import *
from BiNE_lsh import *
from TransE import *
from TryOne import *
# import surprise # 一个专门的 recommender system 包
# import xlearn # 一个专门的 FM 家族的包
def OverallAverage_main():
total_rating = 0.0
count_train = 0.0
for row in train_dataset.iloc():
total_rating += row['rating']
count_train += 1.0
overall_avg = total_rating/count_train
total_MAE = 0.0
total_RMSE = 0.0
count_test = 0.0
for row in test_dataset.iloc():
total_MAE += abs(row['rating'] - overall_avg)
total_RMSE += (row['rating'] - overall_avg)**2
count_test += 1.0
MAE = total_MAE / count_test
RMSE = math.sqrt(total_MAE / count_test)
print('MAE:', MAE, 'RMSE:', RMSE)
def UBCF_explicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
ubcf = UBCF_explicit("pearsonr" , train_dataset_rating_matrix, K)
similarity_matrix = ubcf.similarity_matrix()
similarity_matrix[similarity_matrix < 0] = 0 # 因为后面计算评分时,分母必须是相似性绝对值的和(注意,分子的相对程度不是奥!),所以这里要么转换为绝对值,要么将负的视为0(源代码用的后面这个方法)
estimated_rating_matrix = ubcf.prediction(similarity_matrix) # 这里的estimated_rating_matrix还没有进行-9999处理
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def IBCF_explicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
train_dataset_rating_matrix_T = pd.DataFrame(train_dataset_rating_matrix.values.T,index=train_dataset_rating_matrix.columns.values,columns=train_dataset_rating_matrix.index.values)
ibcf = UBCF_explicit("pearsonr", train_dataset_rating_matrix_T, K)
similarity_matrix = ibcf.similarity_matrix()
similarity_matrix[similarity_matrix < 0] = 0
# 再算评分矩阵
estimated_rating_matrix_T = ibcf.prediction(similarity_matrix)
estimated_rating_matrix = pd.DataFrame(estimated_rating_matrix_T.values.T,index=estimated_rating_matrix_T.columns.values,columns=estimated_rating_matrix_T.index.values)
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def Hybrid_explicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
train_dataset_rating_matrix_T = pd.DataFrame(train_dataset_rating_matrix.values.T,index = train_dataset_rating_matrix.columns.values, columns = train_dataset_rating_matrix.index.values)
ubcf = UBCF_explicit("pearsonr", train_dataset_rating_matrix, K)
similarity_matrix_ubcf = ubcf.similarity_matrix()
similarity_matrix_ubcf[similarity_matrix_ubcf < 0] = 0
estimated_rating_matrix_ubcf = ubcf.prediction(similarity_matrix_ubcf)
ibcf = UBCF_explicit("pearsonr", train_dataset_rating_matrix_T, K)
similarity_matrix_ibcf = ibcf.similarity_matrix()
similarity_matrix_ibcf[similarity_matrix_ibcf < 0] = 0
estimated_rating_matrix_ibcf_T = ibcf.prediction(similarity_matrix_ibcf)
estimated_rating_matrix_ibcf = pd.DataFrame(estimated_rating_matrix_ibcf_T.values.T,index=estimated_rating_matrix_ibcf_T.columns.values,columns=estimated_rating_matrix_ibcf_T.index.values)
estimated_rating_matrix = (estimated_rating_matrix_ubcf + estimated_rating_matrix_ibcf)/2
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def UBCF_implicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
ubcf = UBCF_implicit("pearsonr", train_dataset_rating_matrix, K)
similarity_matrix = ubcf.similarity_matrix()
similarity_matrix[similarity_matrix < 0] = 0
estimated_rating_matrix = ubcf.prediction(similarity_matrix)
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
# IBCF直接继承UBCF,只不过输入转置一下,输出再转置回去
def IBCF_implicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
train_dataset_rating_matrix_T = pd.DataFrame(train_dataset_rating_matrix.values.T,index=train_dataset_rating_matrix.columns.values,columns=train_dataset_rating_matrix.index.values)
ibcf = UBCF_implicit("pearsonr", train_dataset_rating_matrix_T, K)
similarity_matrix = ibcf.similarity_matrix()
similarity_matrix[similarity_matrix < 0] = 0
estimated_rating_matrix_T = ibcf.prediction(similarity_matrix)
estimated_rating_matrix = pd.DataFrame(estimated_rating_matrix_T.values.T,index=estimated_rating_matrix_T.columns.values,columns=estimated_rating_matrix_T.index.values)
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
# 源代码就是用UBCF和IBCF的结果求和后除以2
def Hybrid_implicit_main(K):
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
train_dataset_rating_matrix_T = pd.DataFrame(train_dataset_rating_matrix.values.T,index = train_dataset_rating_matrix.columns.values, columns = train_dataset_rating_matrix.index.values)
ubcf = UBCF_implicit("pearsonr", train_dataset_rating_matrix, K)
similarity_matrix_ubcf = ubcf.similarity_matrix()
similarity_matrix_ubcf[similarity_matrix_ubcf < 0] = 0
estimated_rating_matrix_ubcf = ubcf.prediction(similarity_matrix_ubcf)
ibcf = UBCF_implicit("pearsonr", train_dataset_rating_matrix_T, K)
similarity_matrix_ibcf = ibcf.similarity_matrix()
similarity_matrix_ibcf[similarity_matrix_ibcf < 0] = 0
estimated_rating_matrix_ibcf_T = ibcf.prediction(similarity_matrix_ibcf)
estimated_rating_matrix_ibcf = pd.DataFrame(estimated_rating_matrix_ibcf_T.values.T,index=estimated_rating_matrix_ibcf_T.columns.values,columns=estimated_rating_matrix_ibcf_T.index.values)
estimated_rating_matrix = (estimated_rating_matrix_ubcf + estimated_rating_matrix_ibcf)/2
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def HC_main():
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
physrec = PhysRS(train_dataset_rating_matrix)
estimated_rating_matrix = physrec.HC_propogation() # 这里返回的已经是一个DataFrame类型了
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset, valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def MD_main():
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
physrec = PhysRS(train_dataset_rating_matrix)
estimated_rating_matrix = physrec.MD_propogation() # 这里返回的已经是一个DataFrame类型了
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def HC_MD_main():
train_dataset_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(train_dataset)
physrec = PhysRS(train_dataset_rating_matrix)
estimated_rating_matrix = physrec.HC_MD_propogation(0.5) # 这里的融合参数lambda是针对HC和MD模型中的两个k(item)而言的
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending = False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t',HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
# 这里为FunkSVD使用"早停法"防止过拟合
def FunkSVD_main(max_epoch, early_stopping,learning_rate):
funksvd = FunkSVD(true_train_dataset, validation_dataset, user_embeddings, item_embeddings, n_latent, max_epoch,learning_rate)
best_user_embeddings, best_item_embeddings, best_epoch = funksvd.train(early_stopping)
# 预测
prediction = PREDICTION(true_train_dataset)
estimated_rating_matrix = prediction.GET_ESTIMATED_RATING_MATRIX(best_user_embeddings, best_item_embeddings)
valid_items = estimated_rating_matrix.columns # 获取DataFrame列名(这里也就是item的标签)
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]): # 返回行数
row = estimated_rating_matrix.iloc[i] # 遍历行,按列输出其Name(即索引)所对应的 列名(号) 和 元素值,这样一来就成为了Series类型。特别注意,将行号(从0开始)与索引区分开,每个行号对应着一个索引
user_id = row.name # Series.name读取其在DataFrame相应位置中的index
items = row.sort_values(ascending=False).index # 对Series按从高到低的顺序排序,这样就能获取其对应的索引了,返回索引和值,最后.index提取其中的索引
all_recommendation_list[user_id] = items
# 评测
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
def TryOne_main(max_epoch, early_stopping, learning_rate):
tryone = TryOne(true_train_dataset, validation_dataset, user_embeddings, n_latent, max_epoch,learning_rate,R)
best_user_embeddings, best_epoch = tryone.train(early_stopping)
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
# 这里为PMF借助贝叶斯先验来确定正则化系数以防止过拟合
def PMF_main(max_epoch, early_stopping,learning_rate):
pmf = PMF(true_train_dataset, validation_dataset, user_embeddings, item_embeddings, n_latent, max_epoch,learning_rate)
best_user_embeddings, best_item_embeddings, best_epoch = pmf.train(early_stopping)
# 进行预测
prediction = PREDICTION(true_train_dataset)
estimated_rating_matrix = prediction.GET_ESTIMATED_RATING_MATRIX(best_user_embeddings, best_item_embeddings)
valid_items = estimated_rating_matrix.columns # 获取DataFrame列名(这里也就是item的标签)
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]): # 返回行数
row = estimated_rating_matrix.iloc[i] # 遍历行,按列输出其Name(即索引)所对应的 列名(号) 和 元素值,这样一来就成为了Series类型。特别注意,将行号(从0开始)与索引区分开,每个行号对应着一个索引
user_id = row.name # Series.name读取其在DataFrame相应位置中的index
items = row.sort_values(ascending=False).index # 对Series按从高到低的顺序排序,这样就能获取其对应的索引了,返回索引和值,最后.index提取其中的索引
all_recommendation_list[user_id] = items
# 评测
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
def FM2_main(max_epoch, early_stopping,learning_rate):
fm2 = FM2(true_train_dataset, validation_dataset, user_embeddings, item_embeddings, n_latent, max_epoch,learning_rate)
best_w0, best_w_user, best_w_item, best_user_embeddings, best_item_embeddings, best_epoch = fm2.train(early_stopping)
# 准备预测评分矩阵
max_user = max(true_train_dataset['user'])
max_item = max(true_train_dataset['item'])
estimated_rating_matrix = pd.DataFrame(np.zeros((max_user + 1, max_item + 1)))
for row in true_train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
estimated_rating_matrix = estimated_rating_matrix.loc[~(estimated_rating_matrix == 0).all(axis=1)]
estimated_rating_matrix = estimated_rating_matrix.loc[:, (estimated_rating_matrix != 0).any(axis=0)]
# 计算预测评分矩阵
for user_index, user_embedding in best_user_embeddings.items():
for item_index, item_embedding in best_item_embeddings.items():
if estimated_rating_matrix.loc[user_index, item_index] != -9999:
interaction_1 = user_embedding + item_embedding
interaction_2 = np.multiply(user_embedding, user_embedding) + np.multiply(item_embedding,item_embedding)
interaction = np.sum(np.multiply(interaction_1, interaction_1) - interaction_2) / 2
y = best_w0 + (best_w_user[int(user_index)] + best_w_item[int(item_index)]) + interaction
estimated_rating_matrix.loc[user_index, item_index] = y
valid_items = estimated_rating_matrix.columns # 获取DataFrame列名(这里也就是item的标签)
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]): # 返回行数
row = estimated_rating_matrix.iloc[i] # 遍历行,按列输出其Name(即索引)所对应的 列名(号) 和 元素值,这样一来就成为了Series类型。特别注意,将行号(从0开始)与索引区分开,每个行号对应着一个索引
user_id = row.name # Series.name读取其在DataFrame相应位置中的index
items = row.sort_values(ascending=False).index # 对Series按从高到低的顺序排序,这样就能获取其对应的索引了,返回索引和值,最后.index提取其中的索引
all_recommendation_list[user_id] = items
# 评测
prediction = PREDICTION(true_train_dataset)
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
def SAE_main(max_epoch, early_stopping,learning_rate): # 只关注target>0的,训练集中等于0的那些不能看,参见矩阵分解方法就一目了然了!
sae = SAE(item_size, n_latent)
criterion = nn.MSELoss() # MSE是均方误差,RMSE是均方根误差
optimizer = optim.RMSprop(sae.parameters(), lr = learning_rate,weight_decay = 0) # 全称为root mean square prop,是AdaGrad算法的一种改进。weight_decay是一种防止过拟合的手段,和momentum作用的位置一样
# 训练模型
min_validation_error = np.inf
best_epoch = 0
error_counter = 0
for _ in range(max_epoch):
error_counter += 1
output_train = sae(input_train)
output_train[target_train == 0] = 0
output_train_new = output_train[target_train != 0]
target_train_new = target_train[target_train != 0]
sae.zero_grad()
train_loss = criterion(output_train_new, target_train_new) # 不能用总数做分母,否则Loss总体偏小,会极大影响迭代收敛方向的
train_loss.backward() # 这里使用了optimizer.zero_grad() 反而收敛更慢,并且增大lr会在大误差下震荡,why???
optimizer.step()
sae.eval()
output_validation = sae(input_validation)
output_validation[target_validation ==0] = 0
output_validation_new = output_validation[target_validation != 0 ]
target_validation_new = target_validation[target_validation != 0 ]
validation_loss = criterion(output_validation_new, target_validation_new)
#print('Training loss:', train_loss.item(), 'Validation loss', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(sae, 'best_sae_model.pkl')
best_epoch = _
error_counter = 0
if error_counter >= early_stopping:
break
best_sae = torch.load('best_sae_model.pkl')
best_sae.eval()
estimated_rating_matrix = deepcopy(true_train_dataset_rating_matrix)
for row in estimated_rating_matrix.iloc():
input = torch.tensor(row.values, dtype = torch.float32)
output = best_sae(input).detach().numpy()
estimated_rating_matrix.loc[row.name] = output
for row in train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]): # .shape[0]是输出行数,而.shape[1]是输出列数
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
prediction = PREDICTION(train_dataset)
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
# 不设置负样本的话,结果数据很难看
def GMF_main(max_epoch, early_stopping,learning_rate):
GMF_model = NCF(user_num, item_num, n_latent, 'GMF')
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(GMF_model.parameters(), lr=learning_rate)
min_validation_error = np.inf
best_epoch = 0
error_counter = 0
for _ in range(max_epoch):
error_counter += 1
output_train = GMF_model(user_train, item_train).unsqueeze(1) # 可以用.size()输出维度,如果不加,则为[x]而非[x,1]!
train_loss = criterion(output_train,target_train)
GMF_model.zero_grad() # 在使用.backward()之前要将梯度清零,否则会得到累加值。但之前的SAE和FM并没有这样,如果不这样,发散的话会更快,数字更大!
train_loss.backward() # 这里是整体输入的,所以就要像原代码那样train_loss += loss, count 啥的了,那时一条条记录挨个输入的做法!
optimizer.step()
GMF_model.eval() # 验证集时要关掉dropout
output_validation = GMF_model(user_validation, item_validation).unsqueeze(1)
validation_loss = criterion(output_validation, target_validation) # 注意防止的顺序,否则会出现负值
#print('Training loss:', train_loss.item(), 'Validation loss:', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(GMF_model, 'best_GMF_model.pkl')
best_epoch = _
error_counter = 0
if error_counter>= early_stopping:
break
best_GMF_model = torch.load('best_GMF_model.pkl')
best_GMF_model.eval()
# 下面是算每个用户对每个物品的预测得分,实现细节上我灵机一动,一改以往的挨个元素遍历,借助tensor快速批处理的优势,以向量为单位的视角进行输入!啊!我真是个小天才!
estimated_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(true_train_dataset)
row_size = estimated_rating_matrix.shape[0]
user_rc = torch.tensor(estimated_rating_matrix.index.values.tolist()).unsqueeze(1).long() # 准备着与item_rc呼应,同时作为模型的批量输入
prediction = PREDICTION(true_train_dataset)
columns_set = estimated_rating_matrix.columns.values.tolist()
for i in range(len(columns_set)): # 这里选择以列为单位,因为更新列比较容易一些
item_rc = torch.tensor([columns_set[i] for size in range(row_size)]).unsqueeze(1)
pred = best_GMF_model(user_rc, item_rc).tolist()
estimated_rating_matrix[columns_set[i]] = pred
# 最后进行一下排序前的-9999处理,这里或许还有除了挨个遍历外更好的方法(这一部分始终有些耗时间哦)
for row in true_train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns # 获取DataFrame列名(这里也就是item的标签)
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]): # 返回行数
row = estimated_rating_matrix.iloc[i] # 遍历行,按列输出其Name(即索引)所对应的 列名(号) 和 元素值,这样一来就成为了Series类型。特别注意,将行号(从0开始)与索引区分开,每个行号对应着一个索引
user_id = row.name # Series.name读取其在DataFrame相应位置中的index
items = row.sort_values(ascending=False).index # 对Series按从高到低的顺序排序,这样就能获取其对应的索引了,返回索引和值,最后.index提取其中的索引
all_recommendation_list[user_id] = items
print(best_epoch)
# 评测
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
# 由于之前将MLP放到CPU上跑会卡机,因此这里放到GPU上跑
def MLP_main(max_epoch, early_stopping,learning_rate):
MLP_model = NCF(user_num, item_num, n_latent, 'MLP')
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(MLP_model.parameters(), lr=learning_rate)
min_validation_error = np.inf
best_epoch = 0
error_counter = 0
for _ in range(max_epoch):
error_counter += 1
output_train = MLP_model(user_train, item_train).unsqueeze(1)
train_loss = criterion(output_train, target_train)
MLP_model.zero_grad()
train_loss.backward()
optimizer.step()
MLP_model.eval()
output_validation = MLP_model(user_validation, item_validation).unsqueeze(1)
validation_loss = criterion(output_validation, target_validation)
# print('Training loss:', train_loss.item(), 'Validation loss:', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(MLP_model, 'best_MLP_model.pkl')
best_epoch = _
error_counter = 0
if error_counter >= early_stopping:
break
best_MLP_model = torch.load('best_MLP_model.pkl')
best_MLP_model.eval()
# 下面是算每个用户对每个物品的预测得分,实现细节上我灵机一动,一改以往的挨个元素遍历,借助tensor快速批处理的优势,以向量为单位的视角进行输入!啊!我真是个小天才!
estimated_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(true_train_dataset)
row_size = estimated_rating_matrix.shape[0]
user_rc = torch.tensor(estimated_rating_matrix.index.values.tolist()).unsqueeze(1).long()
prediction = PREDICTION(true_train_dataset)
columns_set = estimated_rating_matrix.columns.values.tolist()
for i in range(len(columns_set)):
item_rc = torch.tensor([columns_set[i] for size in range(row_size)]).unsqueeze(1)
pred = best_MLP_model(user_rc, item_rc).tolist()
estimated_rating_matrix[columns_set[i]] = pred
# 最后进行一下排序前的-9999处理,这里或许还有除了挨个遍历外更好的方法(这一部分始终有些耗时间哦)
for row in true_train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
print(best_epoch)
# 评测
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
# 本地跑这个模型,n_latent = 32或许就是极限了,内存爆表!
def NeuMF_main(max_epoch, early_stopping,learning_rate):
# GMF模型预训练
GMF_model = NCF(user_num, item_num, n_latent, 'GMF')
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(GMF_model.parameters(), lr=0.01)
min_validation_error = np.inf
error_counter = 0
GMF_pretrain_best_epoch = 0
for _ in range(max_epoch):
error_counter += 1
output_train = GMF_model(user_train, item_train).unsqueeze(1)
train_loss = criterion(output_train, target_train)
GMF_model.zero_grad()
train_loss.backward()
optimizer.step()
GMF_model.eval()
output_validation = GMF_model(user_validation, item_validation).unsqueeze(1)
validation_loss = criterion(output_validation, target_validation)
# print('Pre-Train GMF loss:', train_loss.item(), 'Validation loss:', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(GMF_model, 'best_GMF_model.pkl')
GMF_pretrain_best_epoch = _
error_counter = 0
if error_counter >= early_stopping:
break
best_GMF_model = torch.load('best_GMF_model.pkl')
best_GMF_model.eval()
# MLP模型预训练
MLP_model = NCF(user_num, item_num, n_latent, 'MLP')
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(MLP_model.parameters(), lr=0.01)
min_validation_error = np.inf
error_counter = 0
MLP_pretrain_best_epoch = 0
for _ in range(max_epoch):
error_counter += 1
output_train = MLP_model(user_train, item_train).unsqueeze(1)
train_loss = criterion(output_train, target_train)
MLP_model.zero_grad()
train_loss.backward()
optimizer.step()
MLP_model.eval()
output_validation = MLP_model(user_validation, item_validation).unsqueeze(1)
validation_loss = criterion(output_validation, target_validation)
# print('Pre-Train MLP loss:', train_loss.item(), 'Validation loss:', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(MLP_model, 'best_MLP_model.pkl')
MLP_pretrain_best_epoch = _
error_counter = 0
if error_counter >= early_stopping:
break
best_MLP_model = torch.load('best_MLP_model.pkl')
best_MLP_model.eval()
# 开始最终的NeuMF模型训练
NeuMF_model = NCF(user_num, item_num, n_latent, 'NeuMF', best_GMF_model, best_MLP_model)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(NeuMF_model.parameters(), lr=learning_rate)
min_validation_error = np.inf
NCF_best_epoch = 0
error_counter = 0
for _ in range(max_epoch):
error_counter += 1
output_train = NeuMF_model(user_train, item_train).unsqueeze(1)
train_loss = criterion(output_train, target_train)
NeuMF_model.zero_grad()
train_loss.backward()
optimizer.step()
NeuMF_model.eval()
output_validation = NeuMF_model(user_validation, item_validation).unsqueeze(1)
validation_loss = criterion(output_validation, target_validation)
# print('Training loss:', train_loss.item(), 'Validation loss:', validation_loss.item())
if validation_loss.item() < min_validation_error:
min_validation_error = validation_loss.item()
torch.save(NeuMF_model, 'best_NeuMF_model.pkl')
NCF_best_epoch = _
error_counter = 0
if error_counter >= early_stopping:
break
best_NeuMF_model = torch.load('best_NeuMF_model.pkl')
best_NeuMF_model.eval()
# 做预测算指标咯
estimated_rating_matrix = data_ratings.GET_TRAIN_RATING_MATRIX(true_train_dataset)
row_size = estimated_rating_matrix.shape[0]
user_rc = torch.tensor(estimated_rating_matrix.index.values.tolist()).unsqueeze(1).long()
prediction = PREDICTION(true_train_dataset)
columns_set = estimated_rating_matrix.columns.values.tolist()
for i in range(len(columns_set)):
item_rc = torch.tensor([columns_set[i] for size in range(row_size)]).unsqueeze(1)
pred = best_NeuMF_model(user_rc, item_rc).tolist()
estimated_rating_matrix[columns_set[i]] = pred
for row in true_train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = -9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
print(GMF_pretrain_best_epoch, '\t' , MLP_pretrain_best_epoch, '\t' ,NCF_best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t' ,NDCG, '\t' ,MAE, '\t' ,RMSE)
del recommendation_list
def TransE_main(max_epoch, early_stopping,learning_rate):
transE = TransE(user_list, item_list, relation_list, triplet_list, 1 , n_latent,learning_rate) # 倒数第二个参数是 margin
transE.initialize()
best_user_embeddings, best_item_embeddings, best_epoch = transE.transE(max_epoch,early_stopping, validation_dataset)
# 预测,因为对两两user-item对求距离是个双for循环,很耗时,所以这里转换为矩阵运算,也类似于FM中将一个双for转换为多个单for的技巧
prediction = PREDICTION(true_train_dataset)
user_embeddings_matrix = pd.DataFrame.from_dict(best_user_embeddings,orient='index')
item_embeddings_matrix = pd.DataFrame.from_dict(best_item_embeddings)
estimated_rating_matrix = user_embeddings_matrix.dot(item_embeddings_matrix)
user_square = deepcopy(estimated_rating_matrix)
for user in user_square.index.values:
user_square.loc[user,:] = sum([i**2 for i in best_user_embeddings[user]])
item_square = deepcopy(estimated_rating_matrix)
for item in item_square.columns.values:
item_square.loc[:,item] = sum([i**2 for i in best_item_embeddings[item]])
estimated_rating_matrix = -2 * estimated_rating_matrix + user_square + item_square
for row in true_train_dataset.iloc():
estimated_rating_matrix.loc[row['user'], row['item']] = 9999
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=True).index # 这里也要改一下!因为TransE背景下距离越小,节点越相似哦!
all_recommendation_list[user_id] = items
# 评测
print(best_epoch)
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, true_train_dataset,valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def BiNE_walk_generator(gul,args):
print("calculate centrality...")
gul.calculate_centrality(args.mode) # mode默认是hits,得到的是gul中的private parameters(字典) 即 self.authority_u 和 self.authority_v = {}, {}
if args.large == 0: # 默认为.large = 0
gul.homogeneous_graph_random_walks(percentage = args.p, maxT = args.maxT, minT = args.minT)
elif args.large == 1:
gul.homogeneous_graph_random_walks_for_large_bipartite_graph(percentage = args.p, maxT = args.maxT, minT = args.minT)
elif args.large == 2:
gul.homogeneous_graph_random_walks_for_large_bipartite_graph_without_generating(datafile = args.train_data, percentage = args.p, maxT = args.maxT, minT = args.minT)
return gul
def BiNE_get_context_and_negative_samples(gul, args):
if args.large == 0: # 默认为0
neg_dict_u, neg_dict_v = gul.get_negs(args.ns) # ns是 number of negative samples.
print("negative samples is ok.....") # 返回的也是嵌套dict,具体细节有些没搞明白,尤其是那个哈希?但可以先不管
# ws 表示 window size,ns 表示 number of negative samples,G_u表示只包含user的同构图
context_dict_u, neg_dict_u = gul.get_context_and_negatives(gul.G_u, gul.walks_u, args.ws, args.ns, neg_dict_u)
context_dict_v, neg_dict_v = gul.get_context_and_negatives(gul.G_v, gul.walks_v, args.ws, args.ns, neg_dict_v)
else:
neg_dict_u, neg_dict_v = gul.get_negs(args.ns)
print("negative samples is ok.....")
context_dict_u, neg_dict_u = gul.get_context_and_negatives(gul.node_u, gul.walks_u, args.ws, args.ns, neg_dict_u)
context_dict_v, neg_dict_v = gul.get_context_and_negatives(gul.node_v, gul.walks_v, args.ws, args.ns, neg_dict_v)
return context_dict_u, neg_dict_u, context_dict_v, neg_dict_v, gul.node_u, gul.node_v # gul.node_u 和 gul.node_v 是list类型的数据
def BiNE_init_embedding_vectors(node_u, node_v, node_list_u, node_list_v, args):
for i in node_u:
vectors = np.random.random([1, args.d]) # 表示生成1行、d列的随机浮点数,其范围在(0,1)之间,其中d是embedding size,默认为128
help_vectors = np.random.random([1, args.d])
node_list_u[i] = {} # node_list_u 是一个嵌套dict,因为你每个node有embedding vectors和context vectors
# preprocessing是sklearn的函数,norm可以为为l1(样本各个特征值除以各个特征值的绝对值之和)、l2(样本各个特征值除以各个特征值的平方之和)或max(样本各个特征值除以样本中特征值最大的值),默认为l2
node_list_u[i]['embedding_vectors'] = preprocessing.normalize(vectors, norm = 'l2')
node_list_u[i]['context_vectors'] = preprocessing.normalize(help_vectors, norm = 'l2')
for i in node_v:
vectors = np.random.random([1, args.d])
help_vectors = np.random.random([1, args.d])
node_list_v[i] = {}
node_list_v[i]['embedding_vectors'] = preprocessing.normalize(vectors, norm = 'l2')
node_list_v[i]['context_vectors'] = preprocessing.normalize(help_vectors, norm = 'l2')
return node_list_u, node_list_v
def BiNE_skip_gram(center, contexts, negs, node_list, lam, pa): # 分别对应:u, z, neg_u, node_list_u, lam, alpha, 其中z是context_u中一个一个进行输入的,对items训练也类似
loss = 0
I_z = {center: 1} # indication function
for node in negs:
I_z[node] = 0
V = np.array(node_list[contexts]['embedding_vectors'])
update = [[0] * V.size]
for u in I_z.keys():
if node_list.get(u) is None:
pass
Theta = np.array(node_list[u]['context_vectors'])
X = float(V.dot(Theta.T))
sigmod = 1.0 / (1 + (math.exp(-X * 1.0)))
update += pa * lam * (I_z[u] - sigmod) * Theta
node_list[u]['context_vectors'] += pa * lam * (I_z[u] - sigmod) * V
try:
loss += pa * (I_z[u] * math.log(sigmod) + (1 - I_z[u]) * math.log(1 - sigmod))
except:
pass
return update, loss
def BiNE_KL_divergence(edge_dict_u, u, v, node_list_u, node_list_v, lam, gamma):
loss = 0
e_ij = edge_dict_u[u][v]
update_u = 0
update_v = 0
U = np.array(node_list_u[u]['embedding_vectors'])
V = np.array(node_list_v[v]['embedding_vectors'])
X = float(U.dot(V.T))
sigmod = 1.0 / (1 + (math.exp(-X * 1.0)))
update_u += gamma * lam * ((e_ij * (1 - sigmod)) * 1.0 / math.log(math.e, math.e)) * V
update_v += gamma * lam * ((e_ij * (1 - sigmod)) * 1.0 / math.log(math.e, math.e)) * U
try:
loss += gamma * e_ij * math.log(sigmod)
except:
pass
return update_u, update_v, loss
def BiNE_train_by_sampling(train_dataset,test_dataset,args):
print('======== experiment settings =========')
alpha, beta, gamma, lam = args.alpha, args.beta, args.gamma, args.lam
print("constructing graph....")
gul = GraphUtils()
gul.construct_training_graph(train_dataset) # 这里创建的是二部图
edge_dict_u = gul.edge_dict_u # 一个二层嵌套dict,可以通过dixt[user][item]检索出其对应的rating
edge_list = gul.edge_list # 即[(user, item, rating), ...]
BiNE_walk_generator(gul, args) # 这里应该返回个gul吧?之前源代码木有gul =,(想想看其实这里也不用返回吧,因为所有的更改,尤其是gul中对象walks的生成,直接就存储进去了)
print("getting context and negative samples....")
context_dict_u, neg_dict_u, context_dict_v, neg_dict_v, node_u, node_v = BiNE_get_context_and_negative_samples(gul, args)
print("============== training ==============")
for i, n_latent in enumerate([4, 8, 16, 32, 64, 128]):
print('BiNE', 'n_latent=', n_latent)
args.max_iter = 200
args.d = n_latent
node_list_u, node_list_v = {}, {}
node_list_u, node_list_v = BiNE_init_embedding_vectors(node_u, node_v, node_list_u, node_list_v, args)
last_loss, count, epsilon = 0, 0, 1e-3
for iter in range(0, args.max_iter):
# s1 = "\r[%s%s]%0.2f%%" % ("*" * iter, " " * (args.max_iter - iter), iter * 100.0 / (args.max_iter - 1))# 这里实际上是要输出进度条
loss = 0
visited_u = dict(zip(node_list_u.keys(), [0] * len(node_list_u.keys()))) # node_list_u和node_list_v存储着每个user和item的embedding vectors
visited_v = dict(zip(node_list_v.keys(), [0] * len(node_list_v.keys()))) # 这里的visited_u的输出是个字典,形如{u1:0, ...},visited_v也是这样,每次迭代时字典的values清零
random.shuffle(edge_list) # 即gul中的一个对象,存储着edge_list_u_v的三元组
for i in range(len(edge_list)): # 对每条边都进行,
u, v, w = edge_list[i]
# 对users的embeddings进行训练
length = len(context_dict_u[u])
random.shuffle(context_dict_u[u])
if visited_u.get(u) < length:
index_list = list(range(visited_u.get(u), min(visited_u.get(u) + 1,length))) # range(start, stop)但不包含stop,这里看样子就是输出的visited_u.get(u)这一个位置的index
for index in index_list:
context_u = context_dict_u[u][index]
neg_u = neg_dict_u[u][index]
for z in context_u:
tmp_z, tmp_loss = BiNE_skip_gram(u, z, neg_u, node_list_u, lam, alpha)
node_list_u[z]['embedding_vectors'] += tmp_z # 这里嵌套字典的Key可以是字符串名
loss += tmp_loss
visited_u[u] = index_list[-1] + 3 # 本轮结束后光标移动到的位置
# 对items的embeddings进行训练
length = len(context_dict_v[v])
random.shuffle(context_dict_v[v])
if visited_v.get(v) < length:
index_list = list(range(visited_v.get(v), min(visited_v.get(v) + 1, length)))
for index in index_list:
context_v = context_dict_v[v][index]
neg_v = neg_dict_v[v][index]
for z in context_v:
tmp_z, tmp_loss = BiNE_skip_gram(v, z, neg_v, node_list_v, lam, beta)
node_list_v[z]['embedding_vectors'] += tmp_z # 这也是skip_gram的方法,对它们进行求和
loss += tmp_loss
visited_v[v] = index_list[-1] + 3
update_u, update_v, tmp_loss = BiNE_KL_divergence(edge_dict_u, u, v, node_list_u, node_list_v, lam,
gamma)
loss += tmp_loss
node_list_u[u]['embedding_vectors'] += update_u
node_list_v[v]['embedding_vectors'] += update_v
delta_loss = abs(loss - last_loss)
if last_loss > loss:
lam *= 1.05
else:
lam *= 0.95
last_loss = loss
if delta_loss < epsilon:
break
# sys.stdout.write(s1)
# sys.stdout.flush()
# save_to_file(node_list_u, node_list_v, args) # 最终要得到的其实就是users和items的embedding vectors 和 context vectors
print("")
# 之后在推荐过程中就是将对应的user和item的embedding vectors做点积,就得到了将用于推荐排序的预测评分
# 预测
best_user_embeddings = {}
best_item_embeddings = {}
for key, value in node_list_u.items():
best_user_embeddings[int(key[1:])] = value['embedding_vectors'].squeeze() # 这里要对numpy数组压缩一个维度奥!
for key, value in node_list_v.items():
best_item_embeddings[int(key[1:])] = value['embedding_vectors'].squeeze()
prediction = PREDICTION(train_dataset)
estimated_rating_matrix = prediction.GET_ESTIMATED_RATING_MATRIX(best_user_embeddings, best_item_embeddings)
valid_items = estimated_rating_matrix.columns
all_recommendation_list = {}
for i in range(estimated_rating_matrix.shape[0]):
row = estimated_rating_matrix.iloc[i]
user_id = row.name
items = row.sort_values(ascending=False).index
all_recommendation_list[user_id] = items
for recommendation_size in range(10, 60, 10):
recommendation_list = prediction.GET_RECOMMENDATION_LIST(recommendation_size, all_recommendation_list)
evaluation = EVALUATION(estimated_rating_matrix, recommendation_size, recommendation_list, train_dataset,
valid_items, test_dataset)
Precision, HR, NDCG, MAE, RMSE = evaluation.MERICS_01()
print(Precision, '\t', HR, '\t', NDCG, '\t', MAE, '\t', RMSE)
del recommendation_list
def BiNE_main(max_epoch, early_stopping):
parser = ArgumentParser("BiNE", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
# 'BiNE'是程序的名称;formatter_class用于自定义帮助文档输出格式的类,其中ArgumentDefaultsHelpFormatter表示自动添加默认的值的信息到每一个帮助信息的参数中;
# conflict_handler解决冲突选项的策略(通常是不必要的),其中 'resolve' 值可以提供给 ArgumentParser 的 conflict_handler= 参数
parser.add_argument('--train-data', default='rating_train.csv',help='Input graph file.') # 当用户请求帮助时(一般是通过在命令行中使用 -h 或 --help 的方式),这些 help 描述将随每个参数一同显示
parser.add_argument('--test-data', default='rating_test.csv') # 对于name,在声明时最前方要加上--,而后面在调用时就像其他类那样用.即可
parser.add_argument('--model-name', default='default', help='name of model.')
parser.add_argument('--vectors-u', default='vectors_u.dat', help="file of embedding vectors of U")
parser.add_argument('--vectors-v', default='vectors_v.dat', help="file of embedding vectors of V")
parser.add_argument('--case-train', default='case_train.dat', help="file of training data for LR")
parser.add_argument('--case-test', default='case_test.dat', help="file of testing data for LR")
parser.add_argument('--ws', default=5, type=int,help='window size.') # 因为parser会默认将传入的选项当做字符串,所以这里想要整型,则必须为它指定为int的type
parser.add_argument('--ns', default=4, type=int, help='number of negative samples.')
parser.add_argument('--d', default=128, type=int, help='embedding size.') # 向量维度如果后面需要迭代,可以修改
parser.add_argument('--maxT', default=32, type=int, help='maximal walks per vertex.')
parser.add_argument('--minT', default=1, type=int, help='minimal walks per vertex.')
parser.add_argument('--p', default=0.15, type=float, help='walk stopping probability.')
parser.add_argument('--alpha', default=0.01, type=float, help='trade-off parameter alpha.')
parser.add_argument('--beta', default=0.01, type=float, help='trade-off parameter beta.')
parser.add_argument('--gamma', default=0.1, type=float, help='trade-off parameter gamma.')
parser.add_argument('--lam', default=0.01, type=float, help='learning rate lambda.')
parser.add_argument('--max-iter', default=max_epoch, type=int, help='maximal number of iterations.')
parser.add_argument('--stop', default=early_stopping, type=int, help='early stopping number of iterations.')
parser.add_argument('--top-n', default=10, type=int, help='recommend top-n items for each user.')
parser.add_argument('--rec', default=1, type=int, help='calculate the recommendation metrics.')
parser.add_argument('--lip', default=0, type=int, help='calculate the link prediction metrics.')
parser.add_argument('--large', default=0, type=int,help='for large bipartite, 1 do not generate homogeneous graph file; 2 do not generate homogeneous graph, 这里的备注有问题')
parser.add_argument('--mode', default='hits', type=str, help='metrics of centrality')
args = parser.parse_args() # 将参数字符串转换为对象并将其设为命名空间的属性。 返回带有成员的命名空间
BiNE_train_by_sampling(true_train_dataset,test_dataset,args) # 将命名空间中的参数全部进行传入
def construct_smples_for_GMF_MLP_NCF():
validation_samples = validation_dataset # 这些放在最外面,因为每次用到的都是一样的,以免重复操作
user_validation = torch.tensor(validation_samples['user'].tolist()).unsqueeze(1).long()
item_validation = torch.tensor(validation_samples['item'].tolist()).unsqueeze(1).long()
target_validation = torch.tensor(validation_samples['rating'].tolist()).unsqueeze(1).float()
user_num = max(train_dataset['user']) + 1
item_num = max(train_dataset['item']) + 1
true_train_samples = | pd.concat([true_train_dataset, negative_samples]) | pandas.concat |
import torch
from pathlib import Path
import librosa
import numpy as np
from torch.utils.data import Dataset, DataLoader
import json
import pandas as pd
import os
import math
from PIL import Image
import warnings
from helpers.audio_utils import *
from dataloaders.imbalanced_dataset_sampler import ImbalancedDatasetSampler
warnings.filterwarnings("ignore")
class AudioDatasetV2(Dataset):
def __init__(self, root_dir, csv_dir, conf, bird_code, inv_ebird_label, background_audio_dir=None, xeno_csv=None, xeno_dir=None, file_type="ogg", num_splits=5, apply_mixer = False, isTraining=True, transform=None):
self.root_dir = root_dir
df = pd.read_csv(csv_dir)
df.secondary_labels = df.secondary_labels.apply(eval)
df["xeno_source"] = False
self.transform = transform
self.conf = conf
self.num_splits = num_splits
self.isTraining = isTraining
self.apply_mixer = apply_mixerz
self.bird_code = bird_code
self.inv_ebird_label = inv_ebird_label
self.file_type = file_type
self.additional_loader_params = {
"worker_init_fn": self.init_workers_fn
}
self.sampler = ImbalancedDatasetSampler
if xeno_csv is not None:
self.xeno_dir = xeno_dir
df_xeno = | pd.read_csv(xeno_csv) | pandas.read_csv |
import requests
import re
from bs4 import BeautifulSoup
import pandas as pd
import sys
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import QUrl
from PyQt4.QtWebKit import QWebPage
import bs4 as bs
import urllib.request
import os
import datetime
###############################################################
path_of_brandwise = 'C:\\LavaWebScraper\\BrandWiseFiles\\'
###############################################################
base_url = 'http://www.infocusindia.co.in/mobile-phones/'
#ur='https://www.nokia.com/en_int/phones/'
country = 'USA'
company = 'INFOCUS'
model_list = []
usp = []
display_list = []
memory_list = []
processor_list = []
camera_list = []
battery_list = []
thickness_list = []
extras_links = []
records = []
href = []
st_list_heads=[]
st_list_dets=[]
hr=[]
spec_url=[]
r=requests.get(base_url)
soup=BeautifulSoup(r.text,'html.parser')
results=soup.find_all('div',attrs={'class':'row'})
for i in range(len(results)):
sa=results[i].find_all('div',attrs={'class':'col-md-3 col-sm-6'})
for a in range(len(sa)):
sb=sa[a].find_all('h2')
for b in range(len(sb)):
href.append(sb[b].find('a')['href'])
model_list.append(sb[b].text.strip())
sc=sa[a].find_all('table',attrs={'class':'product-carousel-price'})
for c in range(len(sc)):
u=''
sd=sc[c].find_all('td')
for d in range(len(sd)):
se=sd[d].find_all('li')
for e in range(len(se)):
u=u+se[e].text.replace('•',' ').strip()+' || '
usp.append(u)
for i in range(len(href)):
p1=''
m1=''
c1=''
d1=''
r=requests.get(href[i])
soup=BeautifulSoup(r.text,'html.parser')
results=soup.find_all('div',attrs={'class':'col-sm-8 left'})
#print(len(results))
for a in range(len(results)):
sa=results[a].find_all('div',attrs={'class':'figure'})
if len(sa)!=0:
for b in range(len(sa)):
sb=sa[b].find_all('div',attrs={'class':'heading'})
for c in range(len(sb)):
if 'outward appearance' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if 'dimension' in se[f].text.lower():
thickness_list.append(sf[f].text)
if 'processor' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if 'model' in se[f].text.lower() or 'core' in se[f].text.lower():
p1=p1+(se[f].text+':-'+sf[f].text+' || ')
if 'display' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if 'dimension' in se[f].text.lower() or 'material' in se[f].text.lower():
d1=d1+(se[f].text+':-'+sf[f].text+' || ')
if 'battery' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if 'capacity' in se[f].text.lower():
battery_list.append(sf[f].text)
if 'storage' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if ('ram storage' in se[f].text.lower() or 'rom storage' in se[f].text.lower()) and('material' not in se[f].text.lower()):
m1=m1+(se[f].text+':-'+sf[f].text+' || ')
if 'camera' in sb[c].text.lower():
sc=sa[b].find_all('table')
for d in range(len(sc)):
sd=sc[d].find_all('tr')
for e in range(len(sd)):
se=sd[e].find_all('th')
sf=sd[e].find_all('td')
for f in range(len(se)):
if 'pixels' in se[f].text.lower() or 'material' in se[f].text.lower():
c1=c1+(sb[c].text.strip().replace('\n',' ')+':-'+sf[f].text.strip().replace('\n',' ')+' || ')
else:
sb=results[a].find_all('div',attrs={'class':'feature-icon'})
for b in range(len(sb)):
sc=sb[b].find_all('li')
for c in range(len(sc)):
sd=sc[c].find_all('p')
for d in range(len(sd)):
if 'GB' in sd[d].text:
m1=m1+sd[d].text+' || '
if 'mAh' in sd[d].text:
battery_list.append(sd[d].text)
if 'pixel' in sd[d].text.lower():
camera_list.append(sd[d].text)
if 'thickness' in sd[d].text.lower():
thickness_list.append(sd[d].text)
if 'cm' in sd[d].text or 'inch' in sd[d].text:
display_list.append(sd[d].text)
if p1!='':
processor_list.append(p1)
if c1!='':
camera_list.append(c1)
if d1!='':
display_list.append(d1)
if m1!='':
memory_list.append(m1)
if len(battery_list)==i:
battery_list.append('Not Available')
if len(memory_list)==i:
memory_list.append('Not Available')
if len(processor_list)==i:
processor_list.append('Not Available')
if len(display_list)==i:
display_list.append('Not Available')
if len(thickness_list)==i:
thickness_list.append('Not Available')
if len(camera_list)==i:
camera_list.append('Not Available')
if len(usp)==i:
usp.append('Not Available')
print(len(model_list))
print(len(usp))
print(len(thickness_list))
print(len(processor_list))
print(len(memory_list))
print(len(battery_list))
print(len(display_list))
print(len(camera_list))
extras_links = href
for i in range(len(model_list)):
records.append((country, company, model_list[i], usp[i], display_list[i], camera_list[i], memory_list[i], battery_list[i], thickness_list[i], processor_list[i], extras_links[i]))
df = | pd.DataFrame(records, columns = ['COUNTRY', 'COMPANY', 'MODEL', 'USP', 'DISPLAY', 'CAMERA', 'MEMORY', 'BATTERY', 'THICKNESS', 'PROCESSOR', 'EXTRAS/ LINKS']) | pandas.DataFrame |
import pandas as pd
import results
from phrasegeo import Matcher, MatcherPipeline
from time import time
# load up the db
db_name = 'GNAF_VIC'
DB = f"postgresql:///{db_name}"
db = results.db(DB)
# set up the matchers
matcher1 = Matcher(db, how='standard')
matcher2 = Matcher(db, how='slow')
matcher3 = Matcher(db, how='trigram')
# pipeline setup
pipeline = MatcherPipeline([matcher1, matcher2, matcher3])
# load up the test addresses
df = pd.read_csv('phrasegeo/datasets/addresses1.csv')
addresslist = list(df['ADDRESS'].values)
# another set of test addresses
df = | pd.read_csv('phrasegeo/datasets/nab_atm_vic.csv') | pandas.read_csv |
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* K-NN
Input to Proposed Solution:
---------------------------
* Directories of training and testing data in csv file format
* These two types of data should be stored in n x m pattern in csv file format.
Typical Example:
----------------
n x m samples in training csv file (Explain n and m)
k x s samples in testing csv file (Explain k and s
Output of Proposed Solution:
----------------------------
* Predictions generated by learning model for testing set
* They are stored in "results_team12.csv" file. (Change the name file if needed)
Code Owner:
-----------
* Copyright © Team 12. All rights reserved.
* Copyright © Istanbul Technical University, Learning From Data Spring/Fall 2020. All rights reserved.
"""
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.neighbors import NearestNeighbors
from scipy.stats.stats import pearsonr
import random as r
r.seed(1)
np.random.seed(1)
import warnings
warnings.filterwarnings('ignore')
def load_data(csv):
"""
The method reads train and test data from their dataset files.
Then, it splits train data into features and labels.
Parameters
----------
train_file: directory of the file in which train data set is located
test_file: directory of the file in which test data set is located
"""
# reading the data from the csv files
df = pd.read_csv(csv, sep=',')
# ignoring the index column of the data (0,...,149 or 0,...,79)
df = df.drop(columns=['ID'])
df_np = df.to_numpy()
return df_np
def train_model(train_t0, neighbourCount):
"""
The method creates a learning model and trains it by using training data.
Parameters
----------
train_t0: x
neighbourCount: number of neigbours in KNN
"""
nbrs = []
train_t0_single = np.transpose(train_t0)
for i in range(train_t0_single.shape[0]):
nbrs.append(NearestNeighbors(n_neighbors=neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1)))
return nbrs
def predict(train_t0, train_t1, test_t0, nbrs):
"""
The method makes predictions for testing data samples by using trained learning model.
Parameters
----------
train_t0: x
train_t1: y
test_t0: x_test
nbrs: Nearest Neigbors model for each feature
"""
train_t0_single = np.transpose(train_t0)
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
for i in range(train_t0_single.shape[0]):
distances, indices = nbrs[i].kneighbors(test_t0_single[i].reshape(-1,1))
distances = np.ones_like(distances)* 0.7 - distances
mul = np.multiply(distances, train_t1_single[i,indices])
pred = np.divide(np.mean(mul, axis =1), np.mean(distances, axis = 1))
prediction[:,i] = pred.reshape(-1)
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0
return prediction
def cv5(data_t0, data_t1, neighbourCount):
kf = KFold(n_splits=5 , shuffle = True, random_state=1)
prediction_all = np.zeros_like(data_t1)
mses= []
maes = []
pears = []
for trainIndex, testIndex in kf.split(data_t0):
train_t0, test_t0 = data_t0[trainIndex], data_t0[testIndex] #Split Data into train and test sets
train_t1, test_t1 = data_t1[trainIndex], data_t1[testIndex]
train_t0_single = np.transpose(train_t0) # Use features as rows and subjects as columns
train_t1_single = np.transpose(train_t1)
test_t0_single = np.transpose(test_t0)
prediction = np.zeros_like(test_t0)
preds = []
for i in range(train_t0_single.shape[0]): #Loop through each feature
nbrs = NearestNeighbors(n_neighbors= neighbourCount, algorithm='ball_tree').fit(train_t0_single[i].reshape(-1,1))
distances, indices = nbrs.kneighbors(test_t0_single[i].reshape(-1,1))# Calculate the distances and indices of K closest neighbours of test subjects and train subjects in t0
distances = np.ones_like(distances)* 0.7 - distances # Set distances to (0.7 - d). Neighbours with low distance get larger values and vice versa
mul = np.multiply(distances, train_t1_single[i,indices]) # Use the changed distances as weights and multiply the corresponding t1 of the neighbours
pred = np.divide(np.mean(mul,axis =1),np.mean(distances, axis = 1)) #Take the mean of the weighted t1's and divide by the mean of distances to normalize
prediction[:,i] = pred.reshape(-1) #This is the prediction for this feature acroos all test subjects
preds.append(pred.reshape(-1))
nanLocations = np.isnan(prediction)
prediction[nanLocations] = 0 # Set nan locations to 0
preds = np.asarray(preds)
preds = np.transpose(preds)
mses.append( mean_squared_error(preds, test_t1) )
maes.append( mean_absolute_error(preds, test_t1) )
pears.append(pearsonr(preds.flatten(), test_t1.flatten())[0] )
prediction_all[testIndex] = prediction # Put all predictions for each CV fold into prediction_all
mse_error = mean_squared_error(data_t1, prediction_all)
mae_error = mean_absolute_error(data_t1, prediction_all)
print("mses: ", mses)
print("maes: ", maes)
print("pears", pears)
print("Average error of five fold cross validation MSE:", np.sum(mses) / 5)
print("Average error of five fold cross validation MAE:", np.sum(maes) / 5)
print("Average error of five fold cross validation pearson:", np.sum(pears) / 5)
print(" std of five fold cross validation MSE:", np.std(mses))
print(" std of five fold cross validation MAE:", np.std(maes))
print(" std of five fold cross validation pearson:", np.std(pears))
return mae_error, mse_error, prediction_all
def write_output(filename, predictions):
test_df = | pd.DataFrame(predictions) | pandas.DataFrame |
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import spacepy.plot as splot
import datetime as dt
import matplotlib.dates as mdates
import pandas as pd
import statsmodels.api as sm
from scipy.interpolate import interp1d
from scipy import array
import numpy as np
import analysis as ala
import get_sd_data as gsd
np.random.seed(0)
splot.style("spacepy_altgrid")
fontT = {"family": "serif", "color": "k", "weight": "normal", "size": 8}
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
from matplotlib import font_manager
ticks_font = font_manager.FontProperties(family="serif", size=10, weight="normal")
matplotlib.rcParams["xtick.color"] = "k"
matplotlib.rcParams["ytick.color"] = "k"
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["mathtext.default"] = "default"
def extrap1d(x,y,kind="linear"):
""" This method is used to extrapolate 1D paramteres """
interpolator = interp1d(x,y,kind=kind)
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else: return interpolator(x)
def ufunclike(xs):
return array(list(map(pointwise, array(xs))))
return ufunclike
def coloring_axes(ax, atype="left", col="red"):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=col)
ax.yaxis.label.set_color(col)
fmt = matplotlib.dates.DateFormatter("%H%M")
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
return ax
def coloring_twaxes(ax, atype="left", col="red", twcol="k"):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=twcol)
ax.yaxis.label.set_color(twcol)
fmt = matplotlib.dates.DateFormatter("%H%M")
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=5))
return ax
def example_riom_plot(ev=dt.datetime(2015,3,11,16,22), stn="ott",
start=dt.datetime(2015,3,11,16,10), end=dt.datetime(2015,3,11,16,30)):
riom = ala.Riometer(ev, stn).fetch()
gos = ala.GOES(ev).fetch()
fig, axes = plt.subplots(figsize=(9,3),nrows=1,ncols=3,dpi=120)
fig.subplots_adjust(wspace=.1)
col = "red"
ax = coloring_axes(axes[0])
font["color"] = col
ax.semilogy(gos.times,gos.b_flux,col,linewidth=0.75)
ax.axvline(gos.set_index("times").b_flux.idxmax(), color=col, linewidth=0.6)
ax.set_ylim(1e-6,1e-3)
ax.set_ylabel("solar flux\n"+r"($wm^{-2}$)",fontdict=font)
font["color"] = "k"
ax.set_xlabel("time (ut)",fontdict=font)
ax = coloring_twaxes(ax.twinx())
ax.plot(riom.times, riom.absorption,"ko", markersize=1)
ax.axvline(riom.set_index("times").absorption.idxmax(), color="k", linewidth=0.6)
ax.grid(false)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 3.)
dx = (riom.set_index("times").absorption.idxmax()-gos.set_index("times").b_flux.idxmax()).total_seconds()
ax.text(0.54,0.3,r"$\bar{\delta}$=%ds"%(dx),horizontalalignment="center",
verticalalignment="center", transform=ax.transaxes,fontdict=fontt, rotation=90)
ax.set_yticklabels([])
font["color"] = "darkgreen"
ax.text(0.7,1.05,"station - ott, 11 march 2015, universal time",horizontalalignment="center",
verticalalignment="center", transform=ax.transaxes,fontdict=font)
font["color"] = "k"
ax.text(.9,.9,"(a)",horizontalalignment="center",verticalalignment="center", transform=ax.transaxes,fontdict=fontt)
fslope = ala.slope_analysis(np.log10(gos.B_FLUX),gos.times.tolist())
rslope = ala.slope_analysis(riom.absorption.tolist(), riom.times.tolist(), xT=120)
ax = coloring_axes(axes[1])
ax.semilogy(gos.times,gos.B_FLUX,col,linewidth=0.75)
ax.axvline(fslope, color=col, linewidth=0.6, ls="--")
ax.set_ylim(1e-6,1e-3)
ax.set_yticklabels([])
ax.set_xlabel("Time (UT)",fontdict=font)
ax = coloring_twaxes(ax.twinx())
ax.plot(riom.times, riom.absorption,"ko", markersize=1)
ax.grid(False)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 3.)
ax.axvline(rslope, color="k", linewidth=0.6, linestyle="--")
ax.set_yticklabels([])
dy = (rslope-fslope).total_seconds()
ax.text(0.27,0.8,r"$\bar{\delta}_s$=%ds"%(dy),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(.9,.9,"(b)",horizontalalignment="center",verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
dx=40
ax = coloring_axes(axes[2])
ax.semilogy(gos.times,gos.B_FLUX,col,linewidth=0.75)
ax.semilogy(gos.times,np.roll(gos.B_FLUX,dx),"r-.")
ax.set_ylim(1e-6,1e-3)
ax.set_yticklabels([])
ax.set_xlabel("Time (UT)",fontdict=font)
ax = coloring_twaxes(ax.twinx())
ax.plot(riom.times, riom.absorption,"ko", markersize=1)
ax.grid(False)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 3.)
ax.set_ylabel(r"Absorption [$\beta$]" + "\n(in dB)",fontdict=font)
ax.text(0.2,0.9,r"$\bar{\delta}_c$=%ds"%(2*dx),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax.text(0.2,0.83,r"$\rho$=%.2f"%.93,horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax.text(.9,.9,"(c)",horizontalalignment="center",verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
fig.autofmt_xdate(rotation=30,ha="center")
fig.savefig("images/example.png",bbox_inches="tight")
return
def example_rad_plot(ev=dt.datetime(2015,3,11,16,22), stn="bks",
start=dt.datetime(2015,3,11,16,10), end=dt.datetime(2015,3,11,16,30)):
sdr = gsd._fetch_sd_(ev, stn, start, end)
gos = ala.GOES(ev).fetch()
fig, axes = plt.subplots(figsize=(3,3),nrows=1,ncols=1,dpi=120)
fmt = matplotlib.dates.DateFormatter("%H%M")
dx = 25
fslope = ala.slope_analysis(np.log10(gos.B_FLUX),gos.times.tolist())
col = "red"
ax = axes
ax.spines["left"].set_color(col)
ax.tick_params(axis="y", which="both", colors=col)
ax.yaxis.label.set_color(col)
font["color"] = col
ax.xaxis.set_major_formatter(fmt)
ax.semilogy(gos.times,gos.B_FLUX,col,linewidth=0.75)
ax.semilogy(gos.times,np.roll(gos.B_FLUX,dx),"r-.")
ax.axvline(fslope, color=col, linewidth=0.6, ls="--")
ax.set_ylim(1e-6,1e-3)
ax.set_ylabel("Solar Flux\n"+r"($Wm^{-2}$)",fontdict=font)
font["color"] = "k"
ax.set_xlabel("Time (UT)",fontdict=font)
rslope = ala.slope_analysis(sdr.me,sdr.time.tolist())
ax = coloring_twaxes(ax.twinx())
ax.plot(sdr.time, sdr.me, "k-")
ax.set_xlim(start,end)
ax.axvline(rslope, color="k", linewidth=0.6, linestyle="--")
ax.set_ylabel(r"Inverse #-GS",fontdict=font)
ax.grid(False)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 20.)
dy = (rslope-fslope).total_seconds()
ax.text(0.38,0.8,r"$\bar{\delta}_s$=%ds"%(dy),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(0.2,0.9,r"$\bar{\delta}_c$=%ds"%(2*dx),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax.text(0.2,0.83,r"$\rho$=%.2f"%.56,horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax.text(0.78,0.9,"Station - BKS",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
fig.autofmt_xdate(rotation=30,ha="center")
fig.savefig("images/example_sd.png",bbox_inches="tight")
return
def example_hrx_plot(ev=dt.datetime(2015,3,11,16,22), stn="ott",
start=dt.datetime(2015,3,11,16,10), end=dt.datetime(2015,3,11,16,30)):
riom = ala.Riometer(ev, stn).fetch()
gos = ala.GOES(ev).fetch()
fig, axes = plt.subplots(figsize=(3,6),nrows=2,ncols=1,dpi=120)
fig.subplots_adjust(hspace=0.1)
fmt = matplotlib.dates.DateFormatter("%H%M")
fslope = ala.slope_analysis(np.log10(gos.B_FLUX),gos.times.tolist())
col = "red"
ax = coloring_axes(axes[0])
font["color"] = col
ax.semilogy(gos.times,gos.B_FLUX,col,linewidth=0.75)
ax.axvline(gos.set_index("times").B_FLUX.idxmax(), color=col, linewidth=0.6)
ax.axvline(fslope, color=col, linewidth=0.6, ls="--")
ax.set_ylim(1e-8,1e-3)
ax.set_ylabel("Soft X-ray [0.1-0.8 nm]\n"+r"($Wm^{-2}$)",fontdict=font)
font["color"] = "k"
ax.set_xlabel("Time (UT)",fontdict=font)
ax = coloring_twaxes(ax.twinx())
rslope = ala.slope_analysis(riom.absorption.tolist(), riom.times.tolist(), xT=120)
ax.plot(riom.times, riom.absorption,"ko", markersize=1)
ax.axvline(riom.set_index("times").absorption.idxmax(), color="k", linewidth=0.6)
ax.grid(False)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 3.)
ax.axvline(rslope, color="k", linewidth=0.6, linestyle="--")
ax.set_ylabel(r"Absorption [$\beta$]" + "\n(in dB)",fontdict=font)
ax.set_ylim(-.1, 3.)
dx = (riom.set_index("times").absorption.idxmax()-gos.set_index("times").B_FLUX.idxmax()).total_seconds()
dy = (rslope-fslope).total_seconds()
ax.text(0.36,0.85,r"$\bar{\delta}_s$=%ds"%(dy),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(0.68,0.25,r"$\bar{\delta}$=%ds"%(dx),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(0.8,0.9,"Station - OTT",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
fslope = ala.slope_analysis(np.log10(gos.A_FLUX),gos.times.tolist())
col = "blue"
ax = coloring_axes(axes[1])
font["color"] = col
ax.semilogy(gos.times,gos.A_FLUX,col,linewidth=0.75)
ax.axvline(gos.set_index("times").A_FLUX.idxmax(), color=col, linewidth=0.6)
ax.axvline(fslope, color=col, linewidth=0.6, ls="--")
ax.set_ylim(1e-8,1e-3)
ax.set_ylabel("Hard X-ray [0.05-0.4 nm]\n"+r"($Wm^{-2}$)",fontdict=font)
font["color"] = "k"
ax.set_xlabel("Time (UT)",fontdict=font)
ax = coloring_twaxes(ax.twinx())
rslope = ala.slope_analysis(riom.absorption.tolist(), riom.times.tolist(), xT=120)
ax.plot(riom.times, riom.absorption,"ko", markersize=1)
ax.axvline(riom.set_index("times").absorption.idxmax(), color="k", linewidth=0.6)
ax.grid(False)
ax.set_xlim(start,end)
ax.set_ylim(-.1, 3.)
ax.axvline(rslope, color="k", linewidth=0.6, linestyle="--")
ax.set_ylabel(r"Absorption [$\beta$]" + "\n(in dB)",fontdict=font)
ax.set_ylim(-.1, 3.)
dx = (riom.set_index("times").absorption.idxmax()-gos.set_index("times").A_FLUX.idxmax()).total_seconds()
dy = (rslope-fslope).total_seconds()
ax.text(0.36,0.85,r"$\bar{\delta}_s$=%ds"%(dy),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(0.68,0.25,r"$\bar{\delta}$=%ds"%(dx),horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT, rotation=90)
ax.text(0.8,0.9,"Station - OTT",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax.text(0.1,0.9,"(b)",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
ax = axes[0]
ax.axvline(fslope, color="b", linewidth=0.6, linestyle="--",clip_on=False,ymin=-.2)
ax.axvline(gos.set_index("times").A_FLUX.idxmax(), color="b", linewidth=0.6,clip_on=False,ymin=-.2)
ax.text(0.1,0.9,"(a)",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=fontT)
fig.autofmt_xdate(rotation=30,ha="center")
fig.savefig("images/example_hrx.png",bbox_inches="tight")
return
class Statistics(object):
def __init__(self, args):
if args.acase == 0 and not args.rad:
fname = "csv/rio_c0.csv"
self.fname = "images/stat_rio_c0.png"
if args.acase == 1 and not args.rad:
fname = "csv/rio_c1.csv"
self.fname = "images/stat_rio_c1.png"
self.tail = "s"
if args.acase == 2 and not args.rad:
fname = "csv/rio_c2.csv"
self.fname = "images/stat_rio_c2.png"
self.tail = "c"
if args.acase == 1 and args.rad:
fname = "csv/rad_c1.csv"
self.fname = "images/stat_rad_c1.png"
self.tail = "s"
if args.acase == 2 and args.rad:
fname = "csv/rad_c2.csv"
self.fname = "images/stat_rad_c2.png"
self.tail = "c"
self.dat = pd.read_csv(fname)
self.args = args
return
def _model_(self, X, y, family=sm.families.NegativeBinomial()):
model = sm.GLM(y, X, family=family)
response = model.fit()
return response
def getpval(self, x, y, family=sm.families.NegativeBinomial()):
model = sm.GLM(y, x, family=family)
response = model.fit()
print(response.summary())
return
def _create_x_(self, cossza, lat, logfmax, lt, lp="cossza"):
_o = pd.DataFrame()
if lp == "cossza":
L = len(cossza)
_o["cossza"], _o["lat"], _o["lt"], _o["logfmax"] = cossza, [lat]*L, [lt]*L, [logfmax]*L
if lp == "lat":
L = len(lat)
_o["cossza"], _o["lat"], _o["lt"], _o["logfmax"] = [cossza]*L, lat, [lt]*L, [logfmax]*L
if lp == "logfmax":
L = len(logfmax)
_o["cossza"], _o["lat"], _o["lt"], _o["logfmax"] = [cossza]*L, [lat]*L, [lt]*L, logfmax
if lp == "lt":
L = len(lt)
_o["cossza"], _o["lat"], _o["lt"], _o["logfmax"] = [cossza]*L, [lat]*L, lt, [logfmax]*L
return _o
def _image_(self):
def get_bin_mean(dfx, b_start, b_end, param="sza", prcntile=50.):
dt = dfx[(dfx[param]>=b_start) & (dfx[param]<b_end)].dt
mean_val = np.mean(dt)
if len(dt)>0: percentile = np.percentile(dt, prcntile)
else: percentile = np.nan
mad = np.median(np.abs(dt-mean_val))
return [mean_val, percentile, mad]
def to_bin(dfx, bins, param):
binned_data = []
for n in range(0, len(bins)-1):
b_start = bins[n]
b_end = bins[n+1]
binned_data.append(get_bin_mean(dfx, b_start, b_end, param=param))
binned_data = np.array(binned_data)
return binned_data
def cfit(xdat, ydat, xn, crv=lambda u, a, b: u*a+b):
from scipy.optimize import curve_fit
fd = | pd.DataFrame() | pandas.DataFrame |
# Multiscale sampling (MSS) with VASP and LAMMPS
# <NAME>
# Getman Research Group
# Mar 5, 2020
import sys,os
import pandas as pd
import solvent
class ReadInput(object):
def __init__(self, poscar_file, mss_input):
self.readPOSCAR(poscar_file)
self.readMSSinput(mss_input)
self.groupAtom()
def readPOSCAR(self, poscar_file):
""" read VASP POSCAR/CONTCAR file """
self.elem = {} # vac or solvated poscar atoms (pt+ads+h2o), set in readPOSCAR()
self.cell_vec = [] # set in readposcar()
self.old_coords = [] # set in readposcar()
with open(poscar_file) as f:
flag = False
for i, line in enumerate(f):
if len(line.strip().split()) == 0:
break
if i == 0:
title = line.strip().split() # better make this title as the element list
elif i == 1:
self.multiplier = float(line.strip().split()[0])
elif 2 <= i < 5:
self.cell_vec.append([float(j) for j in line.strip().split()])
elif i == 5:
if line.strip().split()[0].isdigit(): # check if provide element name
self.elem['elem'] = title
self.elem['num'] = [int(j) for j in line.strip().split()]
else:
self.elem['elem'] = line.strip().split()
self.elem['num'] = [int(j) for j in next(f).strip().split()]
pattern = next(f).strip().split()[0]
if pattern.lower().startswith('s'):# check if this line is Selective Dyanmics
self.coord_type = next(f).strip().split()[0]
else:
self.coord_type = pattern
if self.coord_type[0].lower() not in ['c', 'd']: # check if cartesian or direct
sys.exit('\nERROR READING POSCAR: please check coordinate type\n')
flag = True
####################################
elif flag and len(line.strip().split()) > 0:
self.old_coords.append(line.strip().split())
def readMSSinput(self, mss_input):
""" read MSS modeling input file """
self.elem_surface = {'elem':[], 'num':[]}
self.elem_ads = {'elem':[], 'num':[]}
self.elem_sol = {'elem':[], 'num':[]}
self.atom = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
concat,
date_range,
)
import pandas._testing as tm
class TestEmptyConcat:
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
{"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left,right,expected",
[
# booleans
(np.bool_, np.int32, np.int32),
(np.bool_, np.float32, np.object_),
# datetime-like
("m8[ns]", np.bool_, np.object_),
("m8[ns]", np.int64, np.object_),
("M8[ns]", np.bool_, np.object_),
("M8[ns]", np.int64, np.object_),
# categorical
("category", "category", "category"),
("category", "object", "object"),
],
)
def test_concat_empty_series_dtypes(self, left, right, expected):
result = concat([Series(dtype=left), Series(dtype=right)])
assert result.dtype == expected
@pytest.mark.parametrize(
"dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
)
def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
dtype = np.dtype(dtype)
result = concat([Series(dtype=dtype)])
assert result.dtype == dtype
result = concat([Series(dtype=dtype), Series(dtype=dtype)])
assert result.dtype == dtype
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"])
def int_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"i", "u", "b"}) and (
dtype.kind == "i" or dtype2.kind == "i"
):
return "i"
elif not len(typs - {"u", "b"}) and (
dtype.kind == "u" or dtype2.kind == "u"
):
return "u"
return None
def float_result_type(dtype, dtype2):
typs = {dtype.kind, dtype2.kind}
if not len(typs - {"f", "i", "u"}) and (
dtype.kind == "f" or dtype2.kind == "f"
):
return "f"
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return "O"
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
assert result.kind == expected
def test_concat_empty_series_dtypes_triple(self):
assert (
concat(
[Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
).dtype
== np.object_
)
def test_concat_empty_series_dtype_category_with_array(self):
# GH#18515
assert (
concat(
[Series(np.array([]), dtype="category"), Series(dtype="float64")]
).dtype
== "float64"
)
def test_concat_empty_series_dtypes_sparse(self):
result = concat(
[
Series(dtype="float64").astype("Sparse"),
Series(dtype="float64").astype("Sparse"),
]
)
assert result.dtype == "Sparse[float64]"
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
)
expected = pd.SparseDtype(np.float64)
assert result.dtype == expected
result = concat(
[Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
)
expected = pd.SparseDtype("object")
assert result.dtype == expected
def test_concat_empty_df_object_dtype(self):
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_concat_empty_dataframe(self):
# 39037
df1 = DataFrame(columns=["a", "b"])
df2 = DataFrame(columns=["b", "c"])
result = concat([df1, df2, df1])
expected = | DataFrame(columns=["a", "b", "c"]) | pandas.DataFrame |
from ctypes import sizeof
import traceback
from matplotlib.pyplot import axis
import pandas as pd
import numpy as np
from datetime import datetime
from time import sleep
from tqdm import tqdm
import random
import warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.datasets import load_linnerud, make_multilabel_classification
from sklearn.multioutput import MultiOutputClassifier, MultiOutputRegressor
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import LinearSVR
from sklearn.neural_network import MLPRegressor, MLPClassifier
# Functions
# ======================================================================
def getClassificationModelType(class_model_type, **kwargs):
if class_model_type == "svm": return MultiOutputClassifier(svm.SVC(kernel='rbf', **kwargs)) # binary classification model
if class_model_type == "random_forest": return RandomForestClassifier(max_depth=2, random_state=0, **kwargs) # binary classification model
if class_model_type == "ann": return MultiOutputClassifier(MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(8, 8), random_state=1, max_iter=10000, **kwargs))
def getColDataTypes(data_df, discrete_info_df):
return [col for col in data_df if discrete_info_df[col]['discrete']], [col for col in data_df if not discrete_info_df[col]['discrete']]
def getEdgeData(data_df, cols):
return data_df[cols]
def getHeartData():
df = pd.read_csv("data/heart.csv")
df.set_index(keys='ID', inplace=True)
return df
def getHeartInfo():
df = pd.read_csv("data/heart.info")
df.set_index(keys='info', inplace=True)
return df
def getMeanSquaredError(y_pred_df, y_df):
return round(mean_squared_error(y_pred=y_pred_df, y_true=y_df), 7)
def getModelAccuracy(y_pred_df, y_df):
return accuracy_score(y_true=y_df, y_pred=y_pred_df)
def getRegressionModelType(reg_model_type, **kwargs):
if reg_model_type == "ridge": return MultiOutputRegressor(Ridge(random_state=123, **kwargs))
if reg_model_type == "random_forest": return RandomForestRegressor(max_depth=2, random_state=0, **kwargs)
if reg_model_type == "k_neighbors": return KNeighborsRegressor(n_neighbors=2, **kwargs)
if reg_model_type == "svr": return MultiOutputRegressor(LinearSVR(random_state=0, tol=1e-05, max_iter=100000, **kwargs))
if reg_model_type == "ann": return MLPRegressor(solver='adam', alpha=1e-5, hidden_layer_sizes=(10, 10), random_state=1, max_iter=100000, **kwargs)
def getSampleData(data_df):
# n = 62,500
# training: 50,000
# testing: 12,500
return data_df.sample(n=62500, random_state=random.randint(a=0, b=2e9))
def main():
# run_simulation(5)
data_collected_1_df = pd.read_csv('data/data_collection_1.csv', index_col=['ID'])
data_collected_1_df.drop(columns=['chest'], inplace=True)
data_collected_2_df = pd.read_csv('data/data_collection_2.csv', index_col=['ID'])
data_collected_2_df.drop(columns=['chest'], inplace=True)
data_prediction([data_collected_1_df, data_collected_2_df])
def modelFit(model, X, y):
try:
# print("Fitting model...")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "")
model.fit(X, y)
except Exception:
print(traceback.print_exc)
# print("Fitting model using ravel()...")
# print(y.ravel())
model.fit(X, y.ravel())
def fitClassificationFeatures(X, y):
# edge features
models = []
y_dfs = []
# model_data_type = 'classification'
# print("Fitting", model_data_type, "models...")
for model_name in ['svm', 'random_forest', 'ann']:
y_temp = y
# print("Fitting", model_name, "...")
if model_name=='ann':
model = getClassificationModelType(model_name)
else:
if model_name=='svm': # pseudo-classification
model = getRegressionModelType('svr')
elif model_name=='random_forest': # pseudo-classification
model = getRegressionModelType(model_name)
y_df = pd.DataFrame(y)
y_dum_df = pd.get_dummies(y_df, columns=y.columns, prefix=y.columns)
y = y_dum_df
# print(y.head())
y_dfs.append(y)
modelFit(model, X, y)
models.append(model)
y = y_temp
# print("Finished edge features classification model fitting...")
return models, y_dfs # fitted classfication models of edge features
def predictClassificationFeatures(models, X, y, discrete_cols, results_cols):
results_df = pd.DataFrame()
gen_cols = []
# edge features generating discrete features
model_data_type ='classification'
# print("Predicting", model_data_type, "models...")
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
# print(model)
y_cols = pd.get_dummies(y, columns=y.columns, prefix=y.columns).columns \
if model_name=='svm' or model_name=='random_forest' else y.columns
heart_gen_prime_df = pd.DataFrame(model.predict(X), columns=y_cols, index=y.index)
if model_name=='svm' or model_name=='random_forest': # binary
for y_col in discrete_cols:
y_pred_cols = [y_pred_col for y_pred_col in heart_gen_prime_df.columns if y_pred_col.startswith(y_col+"_")]
y_pred_cols_df = heart_gen_prime_df[y_pred_cols]
y_pred_cols_df.columns = [y_pred_col.split(y_col+"_", 1)[1] for y_pred_col in y_pred_cols]
heart_gen_prime_df[y_col] = y_pred_cols_df[y_pred_cols_df.columns].idxmax(axis=1)
heart_gen_prime_df.drop(columns=y_pred_cols, inplace=True)
gen_cols.append(heart_gen_prime_df)
# UNCOMMENT
# print('expected')
# print(y[discrete_cols].head(10))
# print([len(y[col].unique()) for col in discrete_cols])
# print('predicted')
# print(heart_gen_prime_df.head(10))
# print([len(heart_gen_prime_df[col].unique()) for col in heart_gen_prime_df.columns])
if isinstance(heart_gen_prime_df, object): # and isinstance(y, np.int64):
# print('convert y_pred_df int64')
heart_gen_prime_df = heart_gen_prime_df.astype('int64')
if isinstance(heart_gen_prime_df, np.int32) and isinstance(y, np.float64):
# print('convert y_pred_df float')
heart_gen_prime_df = heart_gen_prime_df.astype('float64')
accuracy = [getModelAccuracy(y_pred_df=heart_gen_prime_df[col], y_df=y[col]) for col in y.columns]
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, accuracy]).transpose())
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("gen_class_cols_results_df:")
# print(results_df)
return gen_cols
def fitRegressionFeatures(X, y):
# edge features
models = []
y_dfs = []
# model_data_type = 'regression'
# print("Fitting", model_data_type, "models...")
for model_name in ['ridge', 'random_forest', 'svr', 'ann']:
# print("Fitting", model_name, "...")
model = getRegressionModelType(model_name)
y_dfs.append(y)
modelFit(model, X, y)
models.append(model)
# print("Finished edge features regression model fitting...")
return models # fitted regression models of edge features
def predictRegressionFeatures(models, X, y, results_cols):
results_df = pd.DataFrame()
gen_cols = []
# edge features generating continuous features
model_data_type ='regression'
# print("Predicting", model_data_type, "models...")
model_names = ['ridge', 'random_forest', 'svr', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
heart_gen_prime_df = pd.DataFrame(model.predict(X), columns=y.columns, index=y.index)
mse = [getMeanSquaredError(y_pred_df=heart_gen_prime_df[col], y_df=y[col]) for col in y.columns]
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, mse]).transpose())
gen_cols.append(heart_gen_prime_df)
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("gen_reg_cols_results_df:")
# print(results_df)
return gen_cols
def fitAllFeatures(X, y):
# all 13 features
models = []
# model_data_type = 'classification'
# print("Fitting", "models...")
for model in ['svm', 'random_forest', 'ann']:
# print("Fitting", model, "...")
model = getClassificationModelType(model)
modelFit(model, X, y)
models.append(model)
# print("Finished all features classification model fitting...")
return models # fitted classification models of all 13 features
def predictAllFeatures(models, X, y, results_cols):
results_df = pd.DataFrame()
# all 13 features
model_data_type ='classification'
# print("Predicting", model_data_type, "models...")
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
# print("Predicting", model_name, "...")
model = models[i]
y_prime_df = pd.DataFrame(model.predict(X), index=y.index)
accuracy = getModelAccuracy(y_pred_df=y_prime_df, y_df=y)
results_df = results_df.append(pd.DataFrame([model_data_type, model_name, accuracy]).transpose())
results_df.reset_index(drop=True, inplace=True)
results_df.columns = results_cols
# print("results_df:")
# print(results_df)
return results_df
def data_prediction(data_collected_dfs):
heart_data_df = getSampleData(getHeartData())
heart_label_df = pd.DataFrame(heart_data_df['class'])
heart_info_df = getHeartInfo()
for df in [heart_data_df, heart_info_df]: df.drop(columns=['class'], inplace=True)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_data_df, discrete_info_df=heart_info_df)
heart_data_continuous_df = heart_data_df[continuous_cols]
heart_data_discrete_df = heart_data_df[discrete_cols]
# normalizes continuous features
heart_data_continuous_df = (heart_data_continuous_df-heart_data_continuous_df.min())/(heart_data_continuous_df.max()-heart_data_continuous_df.min())
# recombines normalized continuous features with regression features
heart_data_df = pd.concat([heart_data_continuous_df, heart_data_discrete_df], axis=1)
# splits data into training and testing dataframes
X_heart_train_df, X_heart_test_df, y_heart_train_df, y_heart_test_df = train_test_split(heart_data_df, heart_label_df, test_size = 0.2, random_state=random.randint(a=0, b=2e9), shuffle=True)
# fits on training data and all 13 features
models_all_feat = fitAllFeatures(X=X_heart_train_df, y=y_heart_train_df)
edge_cols = ['age',
'sex',
'resting_blood_pressure',
'fasting_blood_sugar',
'resting_electrocardiographic_results',
'maximum_heart_rate_achieved',
'exercise_induced_angina']
# edge data collection
heart_edge_train_df = getEdgeData(data_df=X_heart_train_df, cols=edge_cols)
heart_edge_test_df = getEdgeData(data_df=X_heart_test_df, cols=edge_cols)
# expected generated columns
heart_gen_train_df = X_heart_train_df.drop(columns=edge_cols)
heart_gen_test_df = X_heart_test_df.drop(columns=edge_cols)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_gen_test_df, discrete_info_df=heart_info_df)
y = heart_gen_train_df[discrete_cols]
# combine dataframes
data_collected_df = pd.concat(data_collected_dfs, axis=0)
# generates discrete features using classification models
models_class_feat_gen, y = fitClassificationFeatures(X=heart_edge_train_df, y=y)
heart_gen_class_cols = predictClassificationFeatures(models=models_class_feat_gen, X=heart_edge_test_df, y=heart_gen_test_df[discrete_cols], discrete_cols=discrete_cols, results_cols=['model_type', 'model', 'accuracy'])
arrays = [['', '', '', '', '', '', '', ''], edge_cols]
tuples = list(zip(*arrays))
multi_index = pd.MultiIndex.from_tuples(tuples, names=["model_name", "predicted_cols"])
results_df = data_collected_df
results_df.columns = multi_index
class_gen_cols = []
model_data_type = 'classification'
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
model = models_class_feat_gen[i]
y = heart_gen_test_df[discrete_cols]
arrays = [[model_names[i], model_names[i], model_names[i]], discrete_cols]
tuples = list(zip(*arrays))
multi_index = pd.MultiIndex.from_tuples(tuples, names=["model_name", "predicted_cols"])
y_cols = pd.get_dummies(y, columns=y.columns, prefix=y.columns).columns \
if model_name=='svm' or model_name=='random_forest' else y.columns
y_prime_class_df = pd.DataFrame(model.predict(data_collected_df), columns=y_cols, index=data_collected_df.index)
if model_name=='svm' or model_name=='random_forest': # binary
for y_col in discrete_cols:
y_pred_cols = [y_pred_col for y_pred_col in y_prime_class_df.columns if y_pred_col.startswith(y_col+"_")]
y_pred_cols_df = y_prime_class_df[y_pred_cols]
y_pred_cols_df.columns = [y_pred_col.split(y_col+"_", 1)[1] for y_pred_col in y_pred_cols]
y_prime_class_df[y_col] = y_pred_cols_df[y_pred_cols_df.columns].idxmax(axis=1)
y_prime_class_df.drop(columns=y_pred_cols, inplace=True)
class_gen_cols.append(y_prime_class_df)
y_prime_class_df.columns = multi_index
results_df = pd.concat([results_df, y_prime_class_df], axis=1)
results_df = pd.DataFrame(results_df, index=results_df.index, columns=pd.MultiIndex.from_tuples(results_df.columns))
# print(results_df)
results_df.to_csv('results/gen_class_cols.csv')
# generates continuous features using regression models
models_reg_feat_gen = fitRegressionFeatures(X=heart_edge_train_df, y=heart_gen_train_df[continuous_cols])
heart_gen_reg_cols = predictRegressionFeatures(models=models_reg_feat_gen, X=heart_edge_test_df, y=heart_gen_test_df[continuous_cols], results_cols=['model_type', 'model', 'MSE'])
arrays = [['', '', '', '', '', '', '', ''], edge_cols]
tuples = list(zip(*arrays))
multi_index = pd.MultiIndex.from_tuples(tuples, names=["model_name", "predicted_cols"])
results_df = data_collected_df
reg_gen_cols = []
model_data_type = 'regression'
model_names = ['ridge', 'random_forest', 'svr', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
arrays = [[model_names[i]]*len(continuous_cols), continuous_cols]
tuples = list(zip(*arrays))
multi_index = pd.MultiIndex.from_tuples(tuples, names=["model_name", "predicted_cols"])
model = models_reg_feat_gen[i]
y_prime_reg_df = pd.DataFrame(model.predict(data_collected_df))
y_prime_reg_df.columns = multi_index
results_df = pd.concat([results_df, y_prime_reg_df], axis=1)
# print(model_data_type, model_name)
reg_gen_cols.append(y_prime_class_df)
results_df = pd.DataFrame(results_df, index=results_df.index, columns=pd.MultiIndex.from_tuples(results_df.columns))
results_df.index.name = 'ID'
results_df.to_csv('results/gen_reg_cols.csv')
# predict all 13 features using edge features combined with generated columns from above
results_df = pd.DataFrame()
for c in range(len(heart_gen_class_cols)):
for r in range(len(heart_gen_reg_cols)):
class_models = ['svm', 'random_forest', 'ann']
reg_models = ['ridge', 'random_forest', 'svr', 'ann']
X_heart_test_prime_df = pd.concat([data_collected_df, class_gen_cols[c], reg_gen_cols[r]], axis=1)
# all 13 features
model_data_type ='classification'
# print("Predicting", model_data_type, "models...")
model_names = ['svm', 'random_forest', 'ann']
for i in range(len(model_names)):
model_name = model_names[i]
arrays = [([model_names[i]], [class_models[c]], [reg_models[r]])]
tuples = [(model_names[i], class_models[c], reg_models[r])]
multi_index = pd.MultiIndex.from_tuples(tuples, names=["all_feat_model", "gen_class_model", "gen_reg_model"])
model = models_all_feat[i]
y_prime_df = pd.DataFrame(model.predict(X_heart_test_prime_df), index=data_collected_df.index, columns=[(model_names[i], class_models[c], reg_models[r])])
# print(y_prime_df)
results_df = pd.concat([results_df, y_prime_df], axis=1)
# print(results_df)
results_df.to_csv('results/predicted_results_collected.csv')
def run_simulation(num_runs):
final_results_df = pd.DataFrame(columns=['model_type', 'model', 'accuracy', 'class_gen_model', 'reg_gen_model'])
current_date_time = datetime.now().strftime('%Y%m%d-%H%M%S')
final_results_file_name = 'results/results_{}.csv'.format(current_date_time)
for _ in tqdm(range(num_runs)):
sim_results_df = simulation_instance()
final_results_df = pd.concat([final_results_df, sim_results_df], axis=0) # reorder columns
final_results_df = final_results_df[['class_gen_model', 'reg_gen_model', 'model', 'accuracy']].convert_dtypes()
final_results_df.to_csv(final_results_file_name)
# final_results_df = final_results_df.groupby(['model', 'class_gen_model', 'reg_gen_model'])['accuracy'].mean()
# final_results_df.to_csv('results/results_{}.csv'.format(current_date_time))
print()
print(final_results_df)
simulation_summary(final_results_file_name, current_date_time)
def simulation_summary(final_results_file_name, current_date_time):
simulation_results_df = pd.read_csv(final_results_file_name)
simulation_results_df.columns = ['idx', 'model', 'class_gen_model', 'reg_gen_model', 'accuracy']
simulation_results_df = simulation_results_df[['model', 'class_gen_model', 'reg_gen_model', 'accuracy']]
# print(simulation_results_df)
simulation_results_avg = simulation_results_df.groupby(['model', 'class_gen_model', 'reg_gen_model'])['accuracy'].mean()
simulation_results_avg_df = pd.DataFrame(simulation_results_avg.values, columns=['avg_accuracy'], index=simulation_results_avg.index)
# print(simulation_results_avg_df)
simulation_results_max = simulation_results_df.groupby(['model', 'class_gen_model', 'reg_gen_model'])['accuracy'].max()
simulation_results_max_df = pd.DataFrame(simulation_results_max.values, columns=['max_accuracy'], index=simulation_results_max.index)
# print(simulation_results_max_df)
simulation_results_min = simulation_results_df.groupby(['model', 'class_gen_model', 'reg_gen_model'])['accuracy'].min()
simulation_results_min_df = pd.DataFrame(simulation_results_min.values, columns=['min_accuracy'], index=simulation_results_min.index)
# print(simulation_results_min_df)
# combine average, maximum, and minimum dataframes
final_simulation_results_df = pd.concat([simulation_results_avg_df, simulation_results_max_df, simulation_results_min_df], axis=1)
print(final_simulation_results_df)
final_simulation_results_df.to_csv('results/simulation_results_{}.csv'.format(current_date_time))
def simulation_instance():
heart_data_df = getSampleData(getHeartData())
heart_label_df = pd.DataFrame(heart_data_df['class'])
heart_info_df = getHeartInfo()
for df in [heart_data_df, heart_info_df]: df.drop(columns=['class'], inplace=True)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_data_df, discrete_info_df=heart_info_df)
heart_data_continuous_df = heart_data_df[continuous_cols]
heart_data_discrete_df = heart_data_df[discrete_cols]
# normalizes continuous features
heart_data_continuous_df = (heart_data_continuous_df-heart_data_continuous_df.min())/(heart_data_continuous_df.max()-heart_data_continuous_df.min())
# recombines normalized continuous features with regression features
heart_data_df = pd.concat([heart_data_continuous_df, heart_data_discrete_df], axis=1)
# splits data into training and testing dataframes
X_heart_train_df, X_heart_test_df, y_heart_train_df, y_heart_test_df = train_test_split(heart_data_df, heart_label_df, test_size = 0.2, random_state=random.randint(a=0, b=2e9), shuffle=True)
# fits on training data and all 13 features
models_all_feat = fitAllFeatures(X=X_heart_train_df, y=y_heart_train_df)
edge_cols = ['age',
'sex',
'resting_blood_pressure',
'fasting_blood_sugar',
'resting_electrocardiographic_results',
'maximum_heart_rate_achieved',
'exercise_induced_angina']
# edge data collection
heart_edge_train_df = getEdgeData(data_df=X_heart_train_df, cols=edge_cols)
heart_edge_test_df = getEdgeData(data_df=X_heart_test_df, cols=edge_cols)
# expected generated columns
heart_gen_train_df = X_heart_train_df.drop(columns=edge_cols)
heart_gen_test_df = X_heart_test_df.drop(columns=edge_cols)
discrete_cols, continuous_cols = getColDataTypes(data_df=heart_gen_test_df, discrete_info_df=heart_info_df)
y = heart_gen_train_df[discrete_cols]
# generates discrete features using classification models
models_class_feat_gen, y = fitClassificationFeatures(X=heart_edge_train_df, y=y)
heart_gen_class_cols = predictClassificationFeatures(models=models_class_feat_gen, X=heart_edge_test_df, y=heart_gen_test_df[discrete_cols], discrete_cols=discrete_cols, results_cols=['model_type', 'model', 'accuracy'])
# generates continuous features using regression models
models_reg_feat_gen = fitRegressionFeatures(X=heart_edge_train_df, y=heart_gen_train_df[continuous_cols])
heart_gen_reg_cols = predictRegressionFeatures(models=models_reg_feat_gen, X=heart_edge_test_df, y=heart_gen_test_df[continuous_cols], results_cols=['model_type', 'model', 'MSE'])
# predict all 13 features using test data
predictAllFeatures(models=models_all_feat, X=X_heart_test_df, y=y_heart_test_df, results_cols=['model_type', 'model', 'accuracy'])
# predict all 13 features using edge features combined with generated columns from above
simulation_result_df = pd.DataFrame(columns=['model_type', 'model', 'accuracy', 'class_gen_model', 'reg_gen_model'])
for c in range(len(heart_gen_class_cols)):
for r in range(len(heart_gen_reg_cols)):
class_models = ['svm', 'random_forest', 'ann']
reg_models = ['ridge', 'random_forest', 'svr', 'ann']
X_heart_test_prime_df = pd.concat([heart_edge_test_df, heart_gen_class_cols[c], heart_gen_reg_cols[r]], axis=1)
results_df = predictAllFeatures(models=models_all_feat, X=X_heart_test_prime_df, y=y_heart_test_df, results_cols=['model_type', 'model', 'accuracy'])
results_df = pd.concat([results_df, | pd.DataFrame([[class_models[c], reg_models[r]]]*3) | pandas.DataFrame |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import json
import os
import sys
import subprocess
from configparser import ConfigParser
from tqdm import tqdm
from nltk import sent_tokenize
from sklearn.metrics import accuracy_score, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def retrieve_cosine_similarity(X, Y):
#print(X.shape)
#print(Y.shape)
#print(cosine_similarity(X, Y).shape)
sim_scores = cosine_similarity(X, Y).squeeze(0)
return sim_scores.tolist()
def load_articles(dataset_path, articles_path):
with open(os.path.join(dataset_path, 'train.json')) as f_obj:
train_records = json.load(f_obj)
with open(os.path.join(dataset_path, 'valid.json')) as f_obj:
valid_records = json.load(f_obj)
with open(os.path.join(dataset_path, 'test.json')) as f_obj:
test_records = json.load(f_obj)
all_records = train_records + valid_records + test_records
relevant_article_paths = []
review_article_paths = []
for record in all_records:
review_article_json_path = record["label"]["review_article_json_path"]
assert os.path.isfile(review_article_json_path) == True, f"Error! Review article json file does not exists: {review_article_json_path}"
review_article_paths.append(review_article_json_path)
for item in record['metadata']['relevant_articles']:
for _, article_path in item.items():
if isinstance(article_path, list):
article_path = article_path[1]
relevant_article_paths.append(article_path)
rel_article_path_to_sents = {}
all_rel_sentences = []
for rel_article_path in tqdm(relevant_article_paths):
with open(rel_article_path) as f_obj:
article_sentences = json.load(f_obj)
rel_article_path_to_sents[rel_article_path] = article_sentences
all_rel_sentences.extend(article_sentences)
rev_article_path_to_sents = {}
all_rev_sentences = []
for rev_article_path in tqdm(review_article_paths):
with open(rev_article_path) as f_obj:
article_sentences = json.load(f_obj)
rev_article_path_to_sents[rev_article_path] = article_sentences
all_rev_sentences.extend(article_sentences)
return rel_article_path_to_sents, all_rel_sentences, rev_article_path_to_sents, all_rev_sentences
def load_data_w_rel(dataset_path, article_path_to_sents, tfidf_vectorizer):
with open(os.path.join(dataset_path, 'train.json')) as f_obj:
train_records = json.load(f_obj)
train = []
for record in tqdm(train_records):
train_item = {}
train_item["claim_id"] = record['metadata']['id']
if record['metadata']['claimant'] is not None:
train_item['text'] = record['metadata']['claim'].strip() + ' ' + record['metadata']['claimant'].strip()
train_item['text'] = train_item['text'].strip()
else:
train_item['text'] = record['metadata']['claim'].strip()
X = tfidf_vectorizer.transform([train_item['text']])
evidence_sentences = []
evidence_scores = []
for item in record['metadata']['relevant_articles']:
for _, article_path in item.items():
if isinstance(article_path, list):
article_path = article_path[1]
else:
continue
if article_path not in article_path_to_sents:
continue
article_sentences = [sent.strip() for sent in article_path_to_sents[article_path] if sent.strip() != '']
if len(article_sentences) == 0:
continue
Y = tfidf_vectorizer.transform(article_sentences)
sim_scores = retrieve_cosine_similarity(X, Y)
assert len(sim_scores) == len(article_sentences), f"Error! Count mismatch between similarity scores ({len(sim_scores)}) and article senctences ({len(article_sentences)})!"
evidence_sentences.append(article_sentences)
evidence_scores.append(sim_scores)
# Skip if no evidence sentences are available
if len(evidence_sentences) == 0:
continue
#print(train_item['text'])
#print('Top 10 evidence sentences')
#for index in range(10):
# print(evidence_sentences[similar_indices[index]])
#ranked_evidence = [evidence_sentences[sim_index] for sim_index in similar_indices]
train_item['evidence_sents'] = evidence_sentences
train_item['tfidf_scores'] = evidence_scores
train_item['rating'] = record['label']['rating']
if record['metadata']['claim_date'] is not None:
train_item['date'] = record['metadata']['claim_date']
else:
train_item['date'] = record['metadata']['review_date']
train.append(train_item)
train_df = | pd.DataFrame.from_records(train) | pandas.DataFrame.from_records |
import json
import logging
import datetime
from pathlib import Path
import branca.colormap as cm
import fiona
import folium
import geopandas as gpd
import numpy as np
import pandas as pd
import rasterio
from folium import plugins
from rasterstats import zonal_stats
from shapely import geometry as sgeom
from shapely.geometry import shape
from config.constants import SHAPEFILES_ADEC_DEP_POL
from config.logging_conf import GOES_LOGGER_NAME
from plotters_lib.isobands_gdal import isobands
from wrf_api.goes_api_ingest import post_img_to_api
logger = logging.getLogger(GOES_LOGGER_NAME)
ESCALA_HELADAS = cm.linear.viridis.scale(-10.0, 0.0).to_step(5)
def isvalid(geom):
"""
Verfica la validez de la geometría. Devuelve 1 si es válido, 0 si no.
geom: geometría a verificar.
"""
try:
shape(geom)
return 1
except Exception:
return 0
def color_pedanias(x):
"""
Define los colores para los distintos niveles de heladas según la pedanía.
Si el valor de temperatura es nulo, o nan, devuelve transparente.
Si el valor es mayor a 0 grados, devuelve blanco.
Si pasa las dos condiciones, devuelve el color correspondiente a la escala 'escala_heladas'.
"""
if x['properties']['LST_mean'] is None:
color = 'transparent'
elif x['properties']['LST_mean'] > 0.0:
color = 'white'
else:
color = ESCALA_HELADAS(x['properties']['LST_mean'])
return color
def highlight_function(feature):
"""
Define los parámetros visuales cuando el cursor se encuentra sobre la geometría (función de destaque visual).
"""
return {
'fillColor': color_isotermas(feature),
'color': 'white',
'weight': 3,
'dashArray': '5, 5'
}
def color_isotermas(x):
"""
Define los colores para los distintos niveles de temperaturas según el vector de isotermas generado.
Si el valor de isoterma es nulo, o nan, devuelve transparente.
Si el valor es menor a 6 grados, devuelve el color del valor mínimo (-10 grados).
Si pasa las dos condiciones, devuelve el color correspondiente a la escala 'escala_isotermas'.
"""
if x['properties']['t'] is None:
color = 'transparent'
elif x['properties']['t'] <= -10.0:
color = '#AD1457'
else:
color = ESCALA_HELADAS(x['properties']['t'])
return color
def obtener_estadisticas(shpfile, raster_entrada, archivo_salida, tipo_salida='shp'):
"""
Calcula estadisticas locales (mínima, máxima y promedio) del raster ingresado en el area del shapefile indicado.
Las estadísticas pueden ser expresadas en formato shapefile o csv.
shpfile: ruta del shapefile. (str)
raster_entrada: ruta del raster a analizar. (str)
archivo_salida: archivo donde se guardan los resultados (str)
tipo_salida: tipo del archivo de salida. Shapefile ('shp') o CSV ('csv'). Por defecto 'shp'. (str)
"""
# Defino path del shapefile, lo abro con librería fiona y guardo en geometries los shapes correspondientes
with fiona.open(shpfile) as records:
geometries = [sgeom.shape(shp['geometry'])
for shp in records]
# Calculo las estadísticas de valores mínimos, máximos y promedios
zs = zonal_stats(geometries, raster_entrada)
# Abro el shapefile usando la librería GeoPandas
tabla_shape = gpd.read_file(shpfile)
# Creo un Dataframe con los valores estadísticos obtenidos
goesstats_df = pd.DataFrame(zs)
# Renombro las columnas
goesstats_df.rename(columns={'min': 'LST_min', 'mean': 'LST_mean', 'max': 'LST_max'}, inplace=True)
# Concateno las dos tablas
tabla_shape = pd.concat([tabla_shape, goesstats_df], axis=1)
# Escribo los resultados al disco
if tipo_salida == 'csv':
tabla_shape.drop('geometry', axis=1).to_csv(archivo_salida)
else:
tabla_shape.to_file(archivo_salida)
return
def minimo_folium(path_minimo_shp: str, path_folium_temp_min: Path) -> bool:
"""
Genera el HTML con las estadísticas del promedio de la mínima registrada por departamento/partido, junto con las
isotermas correspondientes.
"""
m = folium.Map(location=[-32.1, -64], zoom_start=6.5, control_scale=True, tiles=None)
tile = 'https://ide.ign.gob.ar/geoservicios/rest/services/Mapas_IGN/mapa_topografico/MapServer/tile/{z}/{y}/{x}'
attribute = 'Mapa del <a href="http://www.ign.gob.ar">Instituto Geográfico Nacional</a>, ' + \
'capa de calles por colaboradores de © <a href="http://openstreetmap.org">OpenStreetMap</a>'
folium.TileLayer(tiles=tile, attr=attribute, name='IGN ').add_to(m)
tile = 'http://wms.ign.gob.ar/geoserver/gwc/service/tms/1.0.0/capabaseargenmap@EPSG:3857@png/{z}/{x}/{-y}.png'
attribute = "Argenmap v2 - Instituto Geográfico Nacional"
folium.TileLayer(tiles=tile, attr=attribute, name='IGN Argenmap v2').add_to(m)
folium.TileLayer(tiles='openstreetmap', name='OSM').add_to(m)
collection = list(fiona.open(path_minimo_shp, 'r'))
df1 = | pd.DataFrame(collection) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
#import os
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import gaussian
from exatomic.base import resource
from exatomic.gaussian import Output, Fchk
class TestFchk(TestCase):
def setUp(self):
self.mam1 = Fchk(resource('g09-ch3nh2-631g.fchk'))
self.mam2 = Fchk(resource('g09-ch3nh2-augccpvdz.fchk'))
self.mam3 = Fchk(resource('g16-methyloxirane-def2tzvp-freq.fchk'))
self.mam4 = Fchk(resource('g16-h2o2-def2tzvp-freq.fchk'))
self.nitro_nmr = Fchk(resource('g16-nitromalonamide-6-31++g-nmr.fchk'))
def test_parse_atom(self):
self.mam1.parse_atom()
self.assertEqual(self.mam1.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.atom))))
self.mam2.parse_atom()
self.assertEqual(self.mam2.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam2.atom))))
def test_parse_basis_set(self):
self.mam1.parse_basis_set()
self.assertEqual(self.mam1.basis_set.shape[0], 32)
self.assertTrue(np.all(pd.notnull(pd.DataFrame(self.mam1.basis_set))))
self.mam2.parse_basis_set()
self.assertEqual(self.mam2.basis_set.shape[0], 53)
self.assertTrue(np.all(pd.notnull( | pd.DataFrame(self.mam2.basis_set) | pandas.DataFrame |
# coding:utf-8
import os
from pathlib import Path
import sys
import argparse
import pdb
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import pickle
import time
from datetime import datetime, timedelta
from sklearn.metrics import confusion_matrix
from functools import partial
import scipy as sp
import matplotlib.pyplot as plt
#from matplotlib_venn import venn2
import lightgbm as lgb
from sklearn import preprocessing
import seaborn as sns
import gc
import psutil
import os
from IPython.display import FileLink
import statistics
import json
import ast
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_validate
import collections
import random
import functools
from sklearn.metrics import roc_curve,auc,accuracy_score,confusion_matrix,f1_score,classification_report
from sklearn.metrics import mean_squared_error
# The metric in question
from sklearn.metrics import cohen_kappa_score
import copy
from sklearn.model_selection import StratifiedKFold, KFold, train_test_split
import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from distutils.util import strtobool
import math
from scipy.sparse import csr_matrix, save_npz, load_npz
from typing import Union
from sklearn.decomposition import PCA
#import dask.dataframe as dd
import re
from sklearn.cluster import KMeans
from contextlib import contextmanager
from collections import deque
#import eli5
#from eli5.sklearn import PermutationImportance
import shutil
import array
#import sqlite3
#from tsfresh.utilities.dataframe_functions import roll_time_series
#from tsfresh import extract_features
SEED_NUMBER=2020
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
set_seed(SEED_NUMBER)
pd.set_option('display.max_columns', 5000)
pd.set_option('display.max_rows', 1000)
EMPTY_NUM=-999
# https://github.com/lopuhin/kaggle-imet-2019/blob/master/imet/utils.py#L17
ON_KAGGLE = False#'KAGGLE_URL_BASE'in os.environ
#print(" os.environ :", os.environ)
print("ON_KAGGLE:", ON_KAGGLE)
if not ON_KAGGLE:
#import slackweb
try:
import wandb
from wandb.lightgbm import wandb_callback
except:
print(f"error : cannot import wandb")
else:
import warnings
warnings.simplefilter('ignore')
PROJECT_NAME = "probspace_kiva"
INPUT_DIR = Path("../data/raw")
PROC_DIR = Path("../data/proc")
LOG_DIR = Path("../data/log")
OUTPUT_DIR = Path("../data/submission")
PATH_TO_GRAPH_DIR=Path("../data/graph")
PATH_TO_MODEL_DIR=Path("../data/model")
PATH_TO_UPLOAD_MODEL_PARENT_DIR=Path("../data/model")
PATH_TO_FEATURES_DIR=Path("../data/features")
class Colors:
"""Defining Color Codes to color the text displayed on terminal.
"""
blue = "\033[94m"
green = "\033[92m"
yellow = "\033[93m"
red = "\033[91m"
end = "\033[0m"
def color(string: str, color: Colors = Colors.yellow) -> str:
return f"{color}{string}{Colors.end}"
@contextmanager
def timer2(label: str) -> None:
"""compute the time the code block takes to run.
"""
p = psutil.Process(os.getpid())
start = time.time() # Setup - __enter__
m0 = p.memory_info()[0] / 2. ** 30
print(color(f"{label}: Start at {start}; RAM USAGE AT START {m0}"))
try:
yield # yield to body of `with` statement
finally: # Teardown - __exit__
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
end = time.time()
print(color(f"{label}: End at {end} ({end - start}[s] elapsed); RAM USAGE AT END {m1:.2f}GB ({sign}{delta:.2f}GB)", color=Colors.red))
@contextmanager
def trace(title):
t0 = time.time()
p = psutil.Process(os.getpid())
m0 = p.memory_info()[0] / 2. ** 30
yield
m1 = p.memory_info()[0] / 2. ** 30
delta = m1 - m0
sign = '+' if delta >= 0 else '-'
delta = math.fabs(delta)
print(f"[{m1:.1f}GB({sign}{delta:.1f}GB):{time.time() - t0:.1f}sec] {title} ", file=sys.stderr)
def cpu_dict(my_dictionary, text=None):
size = sys.getsizeof(json.dumps(my_dictionary))
#size += sum(map(sys.getsizeof, my_dictionary.values())) + sum(map(sys.getsizeof, my_dictionary.keys()))
print(f"{text} size : {size}")
def cpu_stats(text=None):
#if not ON_KAGGLE:
pid = os.getpid()
py = psutil.Process(pid)
memory_use = py.memory_info()[0] / 2. ** 30
print('{} memory GB:'.format(text) + str(memory_use))#str(np.round(memory_use, 2)))
def reduce_mem_Series(se, verbose=True, categories=False):
numeric2reduce = ["int16", "int32", "int64", "float64"]
col_type = se.dtype
best_type = None
if (categories==True) & (col_type == "object"):
se = se.astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
se = pd.to_numeric(se, downcast=downcast)
best_type = se.dtype.name
if verbose and best_type is not None and best_type != str(col_type):
print(f"Series '{se.index}' converted from {col_type} to {best_type}")
return se
def reduce_mem_usage(df, verbose=True, categories=False):
# All types that we want to change for "lighter" ones.
# int8 and float16 are not include because we cannot reduce
# those data types.
# float32 is not include because float16 has too low precision.
numeric2reduce = ["int16", "int32", "int64", "float64"]
start_mem = 0
if verbose:
start_mem = df.memory_usage().sum() / 1024**2
#start_mem = memory_usage_mb(df, deep=deep)
for col, col_type in df.dtypes.iteritems():
best_type = None
if (categories==True) & (col_type == "object"):
df[col] = df[col].astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
df[col] = pd.to_numeric(df[col], downcast=downcast)
best_type = df[col].dtype.name
# Log the conversion performed.
if verbose and best_type is not None and best_type != str(col_type):
print(f"Column '{col}' converted from {col_type} to {best_type}")
if verbose:
#end_mem = memory_usage_mb(df, deep=deep)
end_mem = df.memory_usage().sum() / 1024**2
diff_mem = start_mem - end_mem
percent_mem = 100 * diff_mem / start_mem
print(f"Memory usage decreased from"
f" {start_mem:.2f}MB to {end_mem:.2f}MB"
f" ({diff_mem:.2f}MB, {percent_mem:.2f}% reduction)")
return df
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.6f} s')
def normal_sampling(mean, label_k, std=2, under_limit=1e-15):
val = math.exp(-(label_k-mean)**2/(2*std**2))/(math.sqrt(2*math.pi)*std)
if val < under_limit:
val = under_limit
return val
def compHist(np_oof, np_y_pred, np_y_true, title_str):
np_list = [np_oof, np_y_true, np_y_pred]
label_list = ["oof", "true", "pred"]
color_list = ['red', 'blue', 'green']
for np_data, label, color in zip(np_list, label_list, color_list):
sns.distplot(
np_data,
#bins=sturges(len(data)),
color=color,
kde=True,
label=label
)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_compHist.png"))
plt.close()
def compPredTarget(y_pred, y_true, index_list, title_str, lm_flag=False):
df_total = pd.DataFrame({"Prediction" : y_pred.flatten(),
"Target" : y_true.flatten(),
"Difference" : y_true.flatten() -y_pred.flatten()
#"type" : np.full(len(y_pred), "oof")
}, index=index_list)
print(df_total)
print("Difference > 0.1 : ", df_total[np.abs(df_total["Difference"]) > 0.1].Difference.count())
#print(df_total[df_total["type"]=="valid_train"].Difference)
fig = plt.figure()
sns.displot(df_total.Difference,bins=10)
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_diff_distplot.png"))
plt.close()
#pdb.set_trace()
if lm_flag:
plt.figure()
fig2 = sns.lmplot(x="Target", y="Prediction", data=df_total, palette="Set1")
#fig.set_axis_labels('target', 'pred')
plt.title(title_str)
plt.tight_layout()
plt.savefig(str(PATH_TO_GRAPH_DIR / f"{title_str}_oof_true_lm.png"))
plt.close()
def dimensionReductionPCA(df, _n_components, prefix="PCA_"):
pca = PCA(n_components=_n_components)
pca.fit(df)
reduced_feature = pca.transform(df)
df_reduced = pd.DataFrame(reduced_feature, columns=[f"{prefix}{x + 1}" for x in range(_n_components)], index=df.index)
print(f"df_reduced:{df_reduced}")
df_tmp = pd.DataFrame(pca.explained_variance_ratio_, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
print(df_tmp)
import matplotlib.ticker as ticker
plt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
plt.plot([0] + list( np.cumsum(pca.explained_variance_ratio_)), "-o")
plt.xlabel("Number of principal components")
plt.ylabel("Cumulative contribution rate")
plt.grid()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA.png")
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
# df_comp = pd.DataFrame(pca.components_, columns=df.columns, index=[f"{prefix}{x + 1}" for x in range(_n_components)])
# print(df_comp)
# plt.figure(figsize=(6, 6))
# for x, y, name in zip(pca.components_[0], pca.components_[1], df.columns):
# plt.text(x, y, name)
# plt.scatter(pca.components_[0], pca.components_[1], alpha=0.8)
# plt.grid()
# plt.xlabel("PC1")
# plt.ylabel("PC2")
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_PCA_scatter.png")
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
return df_reduced
def addNanPos(df, cols_list:list, suffix="nan_pos"):
for col in cols_list:
if df[col].isnull().any():
df["{}_{}".format(col, suffix)] = df[col].map(lambda x: 1 if pd.isna(x) else 0)
return df
def get_feature_importances(X, y, shuffle=False):
# 必要ならば目的変数をシャッフル
if shuffle:
y = np.random.permutation(y)
# モデルの学習
clf = RandomForestClassifier(random_state=42)
clf.fit(X, y)
# 特徴量の重要度を含むデータフレームを作成
imp_df = pd.DataFrame()
imp_df["feature"] = X.columns
imp_df["importance"] = clf.feature_importances_
return imp_df.sort_values("importance", ascending=False)
def nullImporcance(df_train_X, df_train_y, th=80, n_runs=100):
# 実際の目的変数でモデルを学習し、特徴量の重要度を含むデータフレームを作成
actual_imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=False)
# 目的変数をシャッフルした状態でモデルを学習し、特徴量の重要度を含むデータフレームを作成
N_RUNS = n_runs
null_imp_df = pd.DataFrame()
for i in range(N_RUNS):
print("run : {}".format(i))
imp_df = get_feature_importances(df_train_X, df_train_y, shuffle=True)
imp_df["run"] = i + 1
null_imp_df = | pd.concat([null_imp_df, imp_df]) | pandas.concat |
# Import required modules
import requests
import pandas as pd
import json
import subprocess
from tqdm import tqdm
import re
# Set pandas to show full rows and columns
pd.set_option('display.max_rows', None)
| pd.set_option('display.max_columns', None) | pandas.set_option |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([10., 11., 12., 11., 10.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.curr_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 10.0, 10.0, 0.0, 0), (1, 0, 4, 10.0, 10.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_tp_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
@njit
def adjust_tp_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=np.inf, adjust_tp_func_nb=adjust_tp_func_nb, adjust_tp_args=(2,)).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_max_orders(self):
_ = from_signals_both(close=price_wide)
_ = from_signals_both(close=price_wide, max_orders=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_both(close=price_wide, log=True)
_ = from_signals_both(close=price_wide, log=True, max_logs=6)
with pytest.raises(Exception):
_ = from_signals_both(close=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandomSignals:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='randnx_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples(
[(0.25, 0.25), (0.5, 0.5)],
names=['rprobnx_entry_prob', 'rprobnx_exit_prob'])
)
# ############# from_order_func ############# #
@njit
def order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size)
@njit
def log_order_func_nb(c, size):
_size = nb.get_elem_nb(c, size)
return nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
@njit
def flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size)
return -1, nb.order_nothing_nb()
@njit
def log_flex_order_func_nb(c, size):
if c.call_idx < c.group_len:
_size = nb.get_col_elem_nb(c, c.from_col + c.call_idx, size)
return c.from_col + c.call_idx, nb.order_nb(_size if c.i % 2 == 0 else -_size, log=True)
return -1, nb.order_nothing_nb()
class TestFromOrderFunc:
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_one_column(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price.tolist(), order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price, order_func, np.asarray(np.inf), row_wise=test_row_wise, flexible=test_flexible)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1),
(2, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (3, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(4, 0, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
@pytest.mark.parametrize("test_use_numba", [False, True])
def test_multiple_columns(self, test_row_wise, test_flexible, test_use_numba):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, vbt.Rep('size'), broadcast_named_args=dict(size=[0, 1, np.inf]),
row_wise=test_row_wise, flexible=test_flexible, use_numba=test_use_numba)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 2, 0, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 2, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 1.0, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 1, 4, 1.0, 5.0, 0.0, 0), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 1, 1.0, 2.0, 0.0, 1),
(2, 1, 2, 1.0, 3.0, 0.0, 0), (3, 1, 3, 1.0, 4.0, 0.0, 1),
(4, 1, 4, 1.0, 5.0, 0.0, 0), (5, 2, 0, 100.0, 1.0, 0.0, 0),
(6, 2, 1, 200.0, 2.0, 0.0, 1), (7, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 3, 66.66666666666669, 4.0, 0.0, 1), (9, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_group_by(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 0, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 2, 1, 200.0, 2.0, 0.0, 1),
(6, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 0, 3, 66.66666666666669, 4.0, 0.0, 1),
(10, 1, 3, 66.66666666666669, 4.0, 0.0, 1), (11, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(12, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (13, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 0, 1, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 0, 2, 133.33333333333334, 3.0, 0.0, 0), (5, 1, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 0, 3, 66.66666666666669, 4.0, 0.0, 1), (7, 1, 3, 66.66666666666669, 4.0, 0.0, 1),
(8, 0, 4, 53.33333333333335, 5.0, 0.0, 0), (9, 1, 4, 53.33333333333335, 5.0, 0.0, 0),
(10, 2, 0, 100.0, 1.0, 0.0, 0), (11, 2, 1, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 2, 3, 66.66666666666669, 4.0, 0.0, 1),
(14, 2, 4, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_cash_sharing(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 0, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 0, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 0, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 0, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 2, 133.33333333, 3., 0., 0), (3, 0, 3, 66.66666667, 4., 0., 1),
(4, 0, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 2, 0, 100., 1., 0., 0),
(2, 1, 1, 200., 2., 0., 1), (3, 2, 1, 200., 2., 0., 1),
(4, 1, 2, 133.33333333, 3., 0., 0), (5, 2, 2, 133.33333333, 3., 0., 0),
(6, 1, 3, 66.66666667, 4., 0., 1), (7, 2, 3, 66.66666667, 4., 0., 1),
(8, 1, 4, 53.33333333, 5., 0., 0), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 2, 133.33333333, 3., 0., 0), (3, 1, 3, 66.66666667, 4., 0., 1),
(4, 1, 4, 53.33333333, 5., 0., 0), (5, 2, 0, 100., 1., 0., 0),
(6, 2, 1, 200., 2., 0., 1), (7, 2, 2, 133.33333333, 3., 0., 0),
(8, 2, 3, 66.66666667, 4., 0., 1), (9, 2, 4, 53.33333333, 5., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.asarray(np.inf), group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def pre_segment_func_nb(c, target_hold_value):
order_size = np.copy(target_hold_value[c.i, c.from_col:c.to_col])
order_size_type = np.full(c.group_len, SizeType.TargetValue)
direction = np.full(c.group_len, Direction.Both)
order_value_out = np.empty(c.group_len, dtype=np.float_)
c.last_val_price[c.from_col:c.to_col] = c.close[c.i, c.from_col:c.to_col]
nb.sort_call_seq_nb(c, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(c, order_size, order_size_type, direction):
col_i = c.call_seq_now[c.call_idx]
return nb.order_nb(
order_size[col_i],
c.close[c.i, col_i],
size_type=order_size_type[col_i],
direction=direction[col_i]
)
pf = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_value(self, test_row_wise, test_flexible):
@njit
def target_val_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_val_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(50., nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetValue)
return -1, nb.order_nothing_nb()
else:
@njit
def target_val_order_func_nb(c):
return nb.order_nb(50., nb.get_elem_nb(c, c.close), size_type=SizeType.TargetValue)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
pre_segment_func_nb=target_val_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 4.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_target_percent(self, test_row_wise, test_flexible):
@njit
def target_pct_pre_segment_func_nb(c, val_price):
c.last_val_price[c.from_col:c.to_col] = val_price[c.i]
return ()
if test_flexible:
@njit
def target_pct_order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(0.5, nb.get_col_elem_nb(c, col, c.close), size_type=SizeType.TargetPercent)
return -1, nb.order_nothing_nb()
else:
@njit
def target_pct_order_func_nb(c):
return nb.order_nb(0.5, nb.get_elem_nb(c, c.close), size_type=SizeType.TargetPercent)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 1, 25.0, 3.0, 0.0, 0), (1, 0, 2, 8.333333333333332, 4.0, 0.0, 1),
(2, 0, 3, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
pf = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
pre_segment_func_nb=target_pct_pre_segment_func_nb,
pre_segment_args=(price.iloc[:-1].values,), row_wise=test_row_wise, flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 0, 1, 25.0, 3.0, 0.0, 1),
(2, 0, 3, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_update_value(self, test_row_wise, test_flexible):
if test_flexible:
@njit
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
@njit
def order_func_nb(c):
return nb.order_nb(
np.inf if c.i % 2 == 0 else -np.inf,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
@njit
def post_order_func_nb(c, value_before, value_now):
value_before[c.i, c.col] = c.value_before
value_now[c.i, c.col] = c.value_now
value_before = np.empty_like(price.values[:, None])
value_now = np.empty_like(price.values[:, None])
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=False,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
value_now
)
_ = vbt.Portfolio.from_order_func(
price,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_order_args=(value_before, value_now),
row_wise=test_row_wise,
update_value=True,
flexible=test_flexible)
np.testing.assert_array_equal(
value_before,
np.array([
[100.0],
[97.04930889128518],
[185.46988117104038],
[82.47853456223025],
[104.65775576218027]
])
)
np.testing.assert_array_equal(
value_now,
np.array([
[98.01980198019803],
[187.36243097890815],
[83.30331990785257],
[105.72569204546781],
[73.54075125567473]
])
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_states(self, test_row_wise, test_flexible):
close = np.array([
[1, 1, 1],
[np.nan, 2, 2],
[3, np.nan, 3],
[4, 4, np.nan],
[5, 5, 5]
])
size = np.array([
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1],
[-1, -1, -1],
[1, 1, 1]
])
value_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
value_arr2 = np.empty(size.shape, dtype=np.float_)
value_arr3 = np.empty(size.shape, dtype=np.float_)
return_arr1 = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr2 = np.empty(size.shape, dtype=np.float_)
return_arr3 = np.empty(size.shape, dtype=np.float_)
pos_record_arr1 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr2 = np.empty(size.shape, dtype=trade_dt)
pos_record_arr3 = np.empty(size.shape, dtype=trade_dt)
def pre_segment_func_nb(c):
value_arr1[c.i, c.group] = c.last_value[c.group]
return_arr1[c.i, c.group] = c.last_return[c.group]
for col in range(c.from_col, c.to_col):
pos_record_arr1[c.i, col] = c.last_pos_record[col]
if c.i > 0:
c.last_val_price[c.from_col:c.to_col] = c.last_val_price[c.from_col:c.to_col] + 0.5
return ()
if test_flexible:
def order_func_nb(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
value_arr2[c.i, col] = c.last_value[c.group]
return_arr2[c.i, col] = c.last_return[c.group]
pos_record_arr2[c.i, col] = c.last_pos_record[col]
return col, nb.order_nb(size[c.i, col], fixed_fees=1.)
return -1, nb.order_nothing_nb()
else:
def order_func_nb(c):
value_arr2[c.i, c.col] = c.value_now
return_arr2[c.i, c.col] = c.return_now
pos_record_arr2[c.i, c.col] = c.pos_record_now
return nb.order_nb(size[c.i, c.col], fixed_fees=1.)
def post_order_func_nb(c):
value_arr3[c.i, c.col] = c.value_now
return_arr3[c.i, c.col] = c.return_now
pos_record_arr3[c.i, c.col] = c.pos_record_now
_ = vbt.Portfolio.from_order_func(
close,
order_func_nb,
pre_segment_func_nb=pre_segment_func_nb,
post_order_func_nb=post_order_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
value_arr1,
np.array([
[100.0, 100.0],
[98.0, 99.0],
[98.5, 99.0],
[99.0, 98.0],
[99.0, 98.5]
])
)
np.testing.assert_array_equal(
value_arr2,
np.array([
[100.0, 99.0, 100.0],
[99.0, 99.0, 99.5],
[99.0, 99.0, 99.0],
[100.0, 100.0, 98.5],
[99.0, 98.5, 99.0]
])
)
np.testing.assert_array_equal(
value_arr3,
np.array([
[99.0, 98.0, 99.0],
[99.0, 98.5, 99.0],
[99.0, 99.0, 98.0],
[100.0, 99.0, 98.5],
[98.5, 97.0, 99.0]
])
)
np.testing.assert_array_equal(
return_arr1,
np.array([
[np.nan, np.nan],
[-0.02, -0.01],
[0.00510204081632653, 0.0],
[0.005076142131979695, -0.010101010101010102],
[0.0, 0.00510204081632653]
])
)
np.testing.assert_array_equal(
return_arr2,
np.array([
[0.0, -0.01, 0.0],
[-0.01, -0.01, -0.005],
[0.01020408163265306, 0.01020408163265306, 0.0],
[0.015228426395939087, 0.015228426395939087, -0.005050505050505051],
[0.0, -0.005050505050505051, 0.01020408163265306]
])
)
np.testing.assert_array_equal(
return_arr3,
np.array([
[-0.01, -0.02, -0.01],
[-0.01, -0.015, -0.01],
[0.01020408163265306, 0.01020408163265306, -0.010101010101010102],
[0.015228426395939087, 0.005076142131979695, -0.005050505050505051],
[-0.005050505050505051, -0.020202020202020204, 0.01020408163265306]
])
)
record_arrays_close(
pos_record_arr1.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr2.flatten()[3:],
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 1.0, 0.25, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.5, 0.375, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.5, -0.375, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
record_arrays_close(
pos_record_arr3.flatten(),
np.array([
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 2, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -1.0, -1.0, 0, 0, 0),
(0, 0, 1.0, 0, 1.0, 1.0, -1, np.nan, 0.0, -0.5, -0.5, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 2, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(0, 0, 2.0, 0, 2.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 0),
(0, 1, 1.0, 0, 1.0, 1.0, 1, 2.0, 1.0, -1.0, -1.0, 0, 1, 0),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -1.0, -0.3333333333333333, 0, 0, 1),
(0, 0, 2.0, 0, 2.0, 2.0, -1, 4.0, 1.0, 1.0, 0.25, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, -1, np.nan, 0.0, -1.0, -0.25, 1, 0, 1),
(1, 2, 1.0, 2, 3.0, 1.0, -1, np.nan, 0.0, -0.5, -0.16666666666666666, 0, 0, 1),
(0, 0, 3.0, 0, 3.0, 3.0, -1, 4.0, 1.0, 1.0, 0.1111111111111111, 0, 0, 0),
(1, 1, 1.0, 3, 4.0, 1.0, 4, 5.0, 1.0, -3.0, -0.75, 1, 1, 1),
(1, 2, 2.0, 2, 4.0, 2.0, -1, np.nan, 0.0, 0.0, 0.0, 0, 0, 1)
], dtype=trade_dt)
)
cash_arr = np.empty((size.shape[0], 2), dtype=np.float_)
position_arr = np.empty(size.shape, dtype=np.float_)
val_price_arr = np.empty(size.shape, dtype=np.float_)
value_arr = np.empty((size.shape[0], 2), dtype=np.float_)
return_arr = np.empty((size.shape[0], 2), dtype=np.float_)
sim_order_cash_arr = np.empty(size.shape, dtype=np.float_)
sim_order_value_arr = np.empty(size.shape, dtype=np.float_)
sim_order_return_arr = np.empty(size.shape, dtype=np.float_)
def post_order_func_nb(c):
sim_order_cash_arr[c.i, c.col] = c.cash_now
sim_order_value_arr[c.i, c.col] = c.value_now
sim_order_return_arr[c.i, c.col] = c.value_now
if c.i == 0 and c.call_idx == 0:
sim_order_return_arr[c.i, c.col] -= c.init_cash[c.group]
sim_order_return_arr[c.i, c.col] /= c.init_cash[c.group]
else:
if c.call_idx == 0:
prev_i = c.i - 1
prev_col = c.to_col - 1
else:
prev_i = c.i
prev_col = c.from_col + c.call_idx - 1
sim_order_return_arr[c.i, c.col] -= sim_order_value_arr[prev_i, prev_col]
sim_order_return_arr[c.i, c.col] /= sim_order_value_arr[prev_i, prev_col]
def post_segment_func_nb(c):
cash_arr[c.i, c.group] = c.last_cash[c.group]
for col in range(c.from_col, c.to_col):
position_arr[c.i, col] = c.last_position[col]
val_price_arr[c.i, col] = c.last_val_price[col]
value_arr[c.i, c.group] = c.last_value[c.group]
return_arr[c.i, c.group] = c.last_return[c.group]
pf = vbt.Portfolio.from_order_func(
close,
order_func_nb,
post_order_func_nb=post_order_func_nb,
post_segment_func_nb=post_segment_func_nb,
use_numba=False,
row_wise=test_row_wise,
update_value=True,
ffill_val_price=True,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
cash_arr,
pf.cash().values
)
np.testing.assert_array_equal(
position_arr,
pf.assets().values
)
np.testing.assert_array_equal(
val_price_arr,
pf.get_filled_close().values
)
np.testing.assert_array_equal(
value_arr,
pf.value().values
)
np.testing.assert_array_equal(
return_arr,
pf.returns().values
)
if test_flexible:
with pytest.raises(Exception):
pf.cash(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.value(in_sim_order=True, group_by=False)
with pytest.raises(Exception):
pf.returns(in_sim_order=True, group_by=False)
else:
np.testing.assert_array_equal(
sim_order_cash_arr,
pf.cash(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_value_arr,
pf.value(in_sim_order=True, group_by=False).values
)
np.testing.assert_array_equal(
sim_order_return_arr,
pf.returns(in_sim_order=True, group_by=False).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_post_sim_ctx(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
1.,
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
return -1, nb.order_nothing_nb()
else:
def order_func(c):
return nb.order_nb(
1.,
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01,
log=True
)
def post_sim_func(c, lst):
lst.append(deepcopy(c))
lst = []
_ = vbt.Portfolio.from_order_func(
price_wide,
order_func,
post_sim_func_nb=post_sim_func,
post_sim_args=(lst,),
row_wise=test_row_wise,
update_value=True,
max_logs=price_wide.shape[0] * price_wide.shape[1],
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
c = lst[-1]
assert c.target_shape == price_wide.shape
np.testing.assert_array_equal(
c.close,
price_wide.values
)
np.testing.assert_array_equal(
c.group_lens,
np.array([2, 1])
)
np.testing.assert_array_equal(
c.init_cash,
np.array([100., 100.])
)
assert c.cash_sharing
if test_flexible:
assert c.call_seq is None
else:
np.testing.assert_array_equal(
c.call_seq,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
np.testing.assert_array_equal(
c.segment_mask,
np.array([
[True, True],
[True, True],
[True, True],
[True, True],
[True, True]
])
)
assert c.ffill_val_price
assert c.update_value
if test_row_wise:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 2, 0, 1.0, 1.01, 1.0101, 0), (3, 0, 1, 1.0, 2.02, 1.0202, 0),
(4, 1, 1, 1.0, 2.02, 1.0202, 0), (5, 2, 1, 1.0, 2.02, 1.0202, 0),
(6, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (7, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(8, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (9, 0, 3, 1.0, 4.04, 1.0404, 0),
(10, 1, 3, 1.0, 4.04, 1.0404, 0), (11, 2, 3, 1.0, 4.04, 1.0404, 0),
(12, 0, 4, 1.0, 5.05, 1.0505, 0), (13, 1, 4, 1.0, 5.05, 1.0505, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
c.order_records,
np.array([
(0, 0, 0, 1.0, 1.01, 1.0101, 0), (1, 1, 0, 1.0, 1.01, 1.0101, 0),
(2, 0, 1, 1.0, 2.02, 1.0202, 0), (3, 1, 1, 1.0, 2.02, 1.0202, 0),
(4, 0, 2, 1.0, 3.0300000000000002, 1.0303, 0), (5, 1, 2, 1.0, 3.0300000000000002, 1.0303, 0),
(6, 0, 3, 1.0, 4.04, 1.0404, 0), (7, 1, 3, 1.0, 4.04, 1.0404, 0),
(8, 0, 4, 1.0, 5.05, 1.0505, 0), (9, 1, 4, 1.0, 5.05, 1.0505, 0),
(10, 2, 0, 1.0, 1.01, 1.0101, 0), (11, 2, 1, 1.0, 2.02, 1.0202, 0),
(12, 2, 2, 1.0, 3.0300000000000002, 1.0303, 0), (13, 2, 3, 1.0, 4.04, 1.0404, 0),
(14, 2, 4, 1.0, 5.05, 1.0505, 0)
], dtype=order_dt)
)
if test_row_wise:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01, 1.0,
0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0, 97.9799,
1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598, 1.0,
0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 2),
(3, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196,
2.0, 0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0, 2.0,
0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 4),
(5, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397, 2.0,
0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 5),
(6, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191, 3.0,
0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 6),
(7, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001,
1.0, 3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 7),
(8, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 8),
(9, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0, 99.75880000000001,
1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
76.67840000000001, 4.0, 0.0, 76.67840000000001, 4.04, 101.83840000000001,
1.0, 4.04, 1.0404, 0, 0, -1, 9),
(10, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 10),
(11, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 11),
(12, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 12),
(13, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
else:
record_arrays_close(
c.log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799,
1.0, 0.0, 97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 0),
(1, 0, 1, 0, 97.9799, 0.0, 0.0, 97.9799, np.nan, 98.9899, 1.0, 1.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 95.9598,
1.0, 0.0, 95.9598, 1.01, 97.97980000000001, 1.0, 1.01, 1.0101, 0, 0, -1, 1),
(2, 0, 0, 1, 95.9598, 1.0, 0.0, 95.9598, 1.0, 97.9598, 1.0, 2.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 92.9196, 2.0,
0.0, 92.9196, 2.02, 97.95960000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 2),
(3, 0, 1, 1, 92.9196, 1.0, 0.0, 92.9196, 1.0, 97.95960000000001, 1.0,
2.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 89.8794,
2.0, 0.0, 89.8794, 2.02, 97.95940000000002, 1.0, 2.02, 1.0202, 0, 0, -1, 3),
(4, 0, 0, 2, 89.8794, 2.0, 0.0, 89.8794, 2.0, 97.8794, 1.0, 3.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.8191,
3.0, 0.0, 85.8191, 3.0300000000000002, 98.90910000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 4),
(5, 0, 1, 2, 85.8191, 2.0, 0.0, 85.8191, 2.0, 98.90910000000001, 1.0,
3.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True,
81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0300000000000002,
99.93880000000001, 1.0, 3.0300000000000002, 1.0303, 0, 0, -1, 5),
(6, 0, 0, 3, 81.75880000000001, 3.0, 0.0, 81.75880000000001, 3.0,
99.75880000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 76.67840000000001, 4.0, 0.0, 76.67840000000001,
4.04, 101.83840000000001, 1.0, 4.04, 1.0404, 0, 0, -1, 6),
(7, 0, 1, 3, 76.67840000000001, 3.0, 0.0, 76.67840000000001, 3.0,
101.83840000000001, 1.0, 4.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 71.59800000000001, 4.0, 0.0, 71.59800000000001,
4.04, 103.918, 1.0, 4.04, 1.0404, 0, 0, -1, 7),
(8, 0, 0, 4, 71.59800000000001, 4.0, 0.0, 71.59800000000001, 4.0,
103.59800000000001, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 65.49750000000002, 5.0, 0.0, 65.49750000000002,
5.05, 106.74750000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 8),
(9, 0, 1, 4, 65.49750000000002, 4.0, 0.0, 65.49750000000002, 4.0,
106.74750000000002, 1.0, 5.0, 0, 2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0,
False, True, False, True, 59.39700000000002, 5.0, 0.0, 59.39700000000002,
5.05, 109.89700000000002, 1.0, 5.05, 1.0505, 0, 0, -1, 9),
(10, 1, 2, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, 1.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 97.9799, 1.0, 0.0,
97.9799, 1.01, 98.9899, 1.0, 1.01, 1.0101, 0, 0, -1, 10),
(11, 1, 2, 1, 97.9799, 1.0, 0.0, 97.9799, 1.0, 98.9799, 1.0, 2.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 94.9397,
2.0, 0.0, 94.9397, 2.02, 98.97970000000001, 1.0, 2.02, 1.0202, 0, 0, -1, 11),
(12, 1, 2, 2, 94.9397, 2.0, 0.0, 94.9397, 2.0, 98.9397, 1.0, 3.0, 0,
2, 0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 90.8794,
3.0, 0.0, 90.8794, 3.0300000000000002, 99.96940000000001, 1.0,
3.0300000000000002, 1.0303, 0, 0, -1, 12),
(13, 1, 2, 3, 90.8794, 3.0, 0.0, 90.8794, 3.0, 99.8794, 1.0, 4.0, 0, 2,
0.01, 1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 85.799, 4.0,
0.0, 85.799, 4.04, 101.959, 1.0, 4.04, 1.0404, 0, 0, -1, 13),
(14, 1, 2, 4, 85.799, 4.0, 0.0, 85.799, 4.0, 101.799, 1.0, 5.0, 0, 2, 0.01,
1.0, 0.01, 0.0, np.inf, 0.0, False, True, False, True, 79.69850000000001,
5.0, 0.0, 79.69850000000001, 5.05, 104.94850000000001, 1.0, 5.05, 1.0505, 0, 0, -1, 14)
], dtype=log_dt)
)
np.testing.assert_array_equal(
c.last_cash,
np.array([59.39700000000002, 79.69850000000001])
)
np.testing.assert_array_equal(
c.last_position,
np.array([5., 5., 5.])
)
np.testing.assert_array_equal(
c.last_val_price,
np.array([5.0, 5.0, 5.0])
)
np.testing.assert_array_equal(
c.last_value,
np.array([109.39700000000002, 104.69850000000001])
)
np.testing.assert_array_equal(
c.second_last_value,
np.array([103.59800000000001, 101.799])
)
np.testing.assert_array_equal(
c.last_return,
np.array([0.05597598409235705, 0.028482598060884715])
)
np.testing.assert_array_equal(
c.last_debt,
np.array([0., 0., 0.])
)
np.testing.assert_array_equal(
c.last_free_cash,
np.array([59.39700000000002, 79.69850000000001])
)
if test_row_wise:
np.testing.assert_array_equal(
c.last_oidx,
np.array([12, 13, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([12, 13, 14])
)
else:
np.testing.assert_array_equal(
c.last_oidx,
np.array([8, 9, 14])
)
np.testing.assert_array_equal(
c.last_lidx,
np.array([8, 9, 14])
)
assert c.order_records[c.last_oidx[0]]['col'] == 0
assert c.order_records[c.last_oidx[1]]['col'] == 1
assert c.order_records[c.last_oidx[2]]['col'] == 2
assert c.log_records[c.last_lidx[0]]['col'] == 0
assert c.log_records[c.last_lidx[1]]['col'] == 1
assert c.log_records[c.last_lidx[2]]['col'] == 2
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_free_cash(self, test_row_wise, test_flexible):
if test_flexible:
def order_func(c, size):
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, nb.order_nb(
size[c.i, col],
nb.get_col_elem_nb(c, col, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
return -1, nb.order_nothing_nb()
else:
def order_func(c, size):
return nb.order_nb(
size[c.i, c.col],
nb.get_elem_nb(c, c.close),
fees=0.01,
fixed_fees=1.,
slippage=0.01
)
def post_order_func(c, debt, free_cash):
debt[c.i, c.col] = c.debt_now
if c.cash_sharing:
free_cash[c.i, c.group] = c.free_cash_now
else:
free_cash[c.i, c.col] = c.free_cash_now
size = np.array([
[5, -5, 5],
[5, -5, -10],
[-5, 5, 10],
[-5, 5, -10],
[-5, 5, 10]
])
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[93.8995, 94.0005, 93.8995],
[82.6985, 83.00150000000001, 92.70150000000001],
[96.39999999999999, 81.55000000000001, 80.8985],
[115.002, 74.998, 79.5025],
[89.0045, 48.49550000000001, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty(price_wide.shape, dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide.vbt.wrapper.wrap(price_wide.values[::-1]),
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 24.75, 0.0],
[0.0, 44.55, 19.8],
[0.0, 22.275, 0.0],
[0.0, 0.0, 9.9],
[4.95, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[73.4975, 74.0025, 73.4975],
[52.0955, 53.00449999999999, 72.1015],
[65.797, 81.25299999999999, 80.0985],
[74.598, 114.60199999999998, 78.9005],
[68.5985, 108.50149999999998, 87.49949999999998]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
debt = np.empty(price_wide.shape, dtype=np.float_)
free_cash = np.empty((price_wide.shape[0], 2), dtype=np.float_)
pf = vbt.Portfolio.from_order_func(
price_wide,
order_func, size,
post_order_func_nb=post_order_func,
post_order_args=(debt, free_cash,),
row_wise=test_row_wise,
use_numba=False,
group_by=[0, 0, 1],
cash_sharing=True,
flexible=test_flexible
)
np.testing.assert_array_equal(
debt,
np.array([
[0.0, 4.95, 0.0],
[0.0, 14.850000000000001, 9.9],
[0.0, 7.425000000000001, 0.0],
[0.0, 0.0, 19.8],
[24.75, 0.0, 0.0]
])
)
np.testing.assert_array_equal(
free_cash,
np.array([
[87.9, 93.8995],
[65.70000000000002, 92.70150000000001],
[77.95000000000002, 80.8985],
[90.00000000000001, 79.5025],
[37.500000000000014, 67.0975]
])
)
np.testing.assert_almost_equal(
free_cash,
pf.cash(free=True).values
)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_init_cash(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=[1., 10., np.inf], flexible=test_flexible)
if test_row_wise:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 1.0, 0.0, 0),
(2, 2, 0, 10.0, 1.0, 0.0, 0), (3, 0, 1, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 2, 1, 10.0, 2.0, 0.0, 1),
(6, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 0, 3, 10.0, 4.0, 0.0, 1),
(10, 1, 3, 10.0, 4.0, 0.0, 1), (11, 2, 3, 10.0, 4.0, 0.0, 1),
(12, 0, 4, 8.0, 5.0, 0.0, 0), (13, 1, 4, 8.0, 5.0, 0.0, 0),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 2.0, 0.0, 1),
(2, 0, 2, 6.666666666666667, 3.0, 0.0, 0), (3, 0, 3, 10.0, 4.0, 0.0, 1),
(4, 0, 4, 8.0, 5.0, 0.0, 0), (5, 1, 0, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 1, 2, 6.666666666666667, 3.0, 0.0, 0),
(8, 1, 3, 10.0, 4.0, 0.0, 1), (9, 1, 4, 8.0, 5.0, 0.0, 0),
(10, 2, 0, 10.0, 1.0, 0.0, 0), (11, 2, 1, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 2, 3, 10.0, 4.0, 0.0, 1),
(14, 2, 4, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(pf._init_cash) == np.ndarray
base_pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=np.inf, flexible=test_flexible)
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.Auto, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.Auto
pf = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(10.), row_wise=test_row_wise,
init_cash=InitCashMode.AutoAlign, flexible=test_flexible)
record_arrays_close(
pf.order_records,
base_pf.orders.values
)
assert pf._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 56
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [56]
assert list(pre_group_lst) == [2, 34]
assert list(post_group_lst) == [33, 55]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 35, 39, 43, 47, 51]
assert list(post_segment_lst) == [8, 14, 20, 26, 32, 38, 42, 46, 50, 54]
assert list(order_lst) == [4, 6, 10, 12, 16, 18, 22, 24, 28, 30, 36, 40, 44, 48, 52]
assert list(post_order_lst) == [5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 37, 41, 45, 49, 53]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 38
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [38]
assert list(pre_group_lst) == [2, 22]
assert list(post_group_lst) == [21, 37]
assert list(pre_segment_lst) == [3, 5, 7, 13, 19, 23, 25, 29, 31, 35]
assert list(post_segment_lst) == [4, 6, 12, 18, 20, 24, 28, 30, 34, 36]
assert list(order_lst) == [8, 10, 14, 16, 26, 32]
assert list(post_order_lst) == [9, 11, 15, 17, 27, 33]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, template_mapping=dict(np=np)
)
assert call_i[0] == 26
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [26]
assert list(pre_group_lst) == [2, 16]
assert list(post_group_lst) == [15, 25]
assert list(pre_segment_lst) == [3, 9, 17, 21]
assert list(post_segment_lst) == [8, 14, 20, 24]
assert list(order_lst) == [4, 6, 10, 12, 18, 22]
assert list(post_order_lst) == [5, 7, 11, 13, 19, 23]
def test_func_calls_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_group_func_nb(c, call_i, pre_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_group_lst.append(call_i[0])
return (call_i,)
@njit
def post_group_func_nb(c, call_i, post_group_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_group_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 66
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [66]
assert list(pre_group_lst) == [2, 39]
assert list(post_group_lst) == [38, 65]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 40, 45, 50, 55, 60]
assert list(post_segment_lst) == [9, 16, 23, 30, 37, 44, 49, 54, 59, 64]
assert list(order_lst) == [
4, 6, 8, 11, 13, 15, 18, 20, 22, 25, 27, 29, 32, 34,
36, 41, 43, 46, 48, 51, 53, 56, 58, 61, 63
]
assert list(post_order_lst) == [5, 7, 12, 14, 19, 21, 26, 28, 33, 35, 42, 47, 52, 57, 62]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 42
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [42]
assert list(pre_group_lst) == [2, 24]
assert list(post_group_lst) == [23, 41]
assert list(pre_segment_lst) == [3, 5, 7, 14, 21, 25, 27, 32, 34, 39]
assert list(post_segment_lst) == [4, 6, 13, 20, 22, 26, 31, 33, 38, 40]
assert list(order_lst) == [8, 10, 12, 15, 17, 19, 28, 30, 35, 37]
assert list(post_order_lst) == [9, 11, 16, 18, 29, 36]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_group_lst = List.empty_list(typeof(0))
post_group_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_group_func_nb=pre_group_func_nb, pre_group_args=(pre_group_lst, sub_arg),
post_group_func_nb=post_group_func_nb, post_group_args=(post_group_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=False, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 30
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [30]
assert list(pre_group_lst) == [2, 18]
assert list(post_group_lst) == [17, 29]
assert list(pre_segment_lst) == [3, 10, 19, 24]
assert list(post_segment_lst) == [9, 16, 23, 28]
assert list(order_lst) == [4, 6, 8, 11, 13, 15, 20, 22, 25, 27]
assert list(post_order_lst) == [5, 7, 12, 14, 21, 26]
def test_func_calls_row_wise(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst):
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst):
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst):
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst):
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst):
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst):
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(c, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst):
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 62
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [62]
assert list(pre_row_lst) == [2, 14, 26, 38, 50]
assert list(post_row_lst) == [13, 25, 37, 49, 61]
assert list(pre_segment_lst) == [3, 9, 15, 21, 27, 33, 39, 45, 51, 57]
assert list(post_segment_lst) == [8, 12, 20, 24, 32, 36, 44, 48, 56, 60]
assert list(order_lst) == [4, 6, 10, 16, 18, 22, 28, 30, 34, 40, 42, 46, 52, 54, 58]
assert list(post_order_lst) == [5, 7, 11, 17, 19, 23, 29, 31, 35, 41, 43, 47, 53, 55, 59]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 44
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [44]
assert list(pre_row_lst) == [2, 8, 16, 26, 38]
assert list(post_row_lst) == [7, 15, 25, 37, 43]
assert list(pre_segment_lst) == [3, 5, 9, 11, 17, 23, 27, 33, 39, 41]
assert list(post_segment_lst) == [4, 6, 10, 14, 22, 24, 32, 36, 40, 42]
assert list(order_lst) == [12, 18, 20, 28, 30, 34]
assert list(post_order_lst) == [13, 19, 21, 29, 31, 35]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst,),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst,),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst,),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst,),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst,),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, template_mapping=dict(np=np)
)
assert call_i[0] == 32
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [32]
assert list(pre_row_lst) == [2, 4, 10, 18, 30]
assert list(post_row_lst) == [3, 9, 17, 29, 31]
assert list(pre_segment_lst) == [5, 11, 19, 25]
assert list(post_segment_lst) == [8, 16, 24, 28]
assert list(order_lst) == [6, 12, 14, 20, 22, 26]
assert list(post_order_lst) == [7, 13, 15, 21, 23, 27]
def test_func_calls_row_wise_flexible(self):
@njit
def pre_sim_func_nb(c, call_i, pre_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_sim_lst.append(call_i[0])
return (call_i,)
@njit
def post_sim_func_nb(c, call_i, post_sim_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_sim_lst.append(call_i[0])
return (call_i,)
@njit
def pre_row_func_nb(c, call_i, pre_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_row_lst.append(call_i[0])
return (call_i,)
@njit
def post_row_func_nb(c, call_i, post_row_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_row_lst.append(call_i[0])
return (call_i,)
@njit
def pre_segment_func_nb(c, call_i, pre_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
pre_segment_lst.append(call_i[0])
return (call_i,)
@njit
def post_segment_func_nb(c, call_i, post_segment_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_segment_lst.append(call_i[0])
return (call_i,)
@njit
def flex_order_func_nb(c, call_i, order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
order_lst.append(call_i[0])
col = c.from_col + c.call_idx
if c.call_idx < c.group_len:
return col, NoOrder
return -1, NoOrder
@njit
def post_order_func_nb(c, call_i, post_order_lst, sub_arg):
if sub_arg != 15:
raise ValueError
call_i[0] += 1
post_order_lst.append(call_i[0])
sub_arg = vbt.RepEval('np.prod([target_shape[0], target_shape[1]])')
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 72
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [72]
assert list(pre_row_lst) == [2, 16, 30, 44, 58]
assert list(post_row_lst) == [15, 29, 43, 57, 71]
assert list(pre_segment_lst) == [3, 10, 17, 24, 31, 38, 45, 52, 59, 66]
assert list(post_segment_lst) == [9, 14, 23, 28, 37, 42, 51, 56, 65, 70]
assert list(order_lst) == [
4, 6, 8, 11, 13, 18, 20, 22, 25, 27, 32, 34, 36,
39, 41, 46, 48, 50, 53, 55, 60, 62, 64, 67, 69
]
assert list(post_order_lst) == [5, 7, 12, 19, 21, 26, 33, 35, 40, 47, 49, 54, 61, 63, 68]
segment_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=True, call_post_segment=True,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 48
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [48]
assert list(pre_row_lst) == [2, 8, 17, 28, 42]
assert list(post_row_lst) == [7, 16, 27, 41, 47]
assert list(pre_segment_lst) == [3, 5, 9, 11, 18, 25, 29, 36, 43, 45]
assert list(post_segment_lst) == [4, 6, 10, 15, 24, 26, 35, 40, 44, 46]
assert list(order_lst) == [12, 14, 19, 21, 23, 30, 32, 34, 37, 39]
assert list(post_order_lst) == [13, 20, 22, 31, 33, 38]
call_i = np.array([0])
pre_sim_lst = List.empty_list(typeof(0))
post_sim_lst = List.empty_list(typeof(0))
pre_row_lst = List.empty_list(typeof(0))
post_row_lst = List.empty_list(typeof(0))
pre_segment_lst = List.empty_list(typeof(0))
post_segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
post_order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, flex_order_func_nb, order_lst, sub_arg,
group_by=np.array([0, 0, 1]),
pre_sim_func_nb=pre_sim_func_nb, pre_sim_args=(call_i, pre_sim_lst, sub_arg),
post_sim_func_nb=post_sim_func_nb, post_sim_args=(call_i, post_sim_lst, sub_arg),
pre_row_func_nb=pre_row_func_nb, pre_row_args=(pre_row_lst, sub_arg),
post_row_func_nb=post_row_func_nb, post_row_args=(post_row_lst, sub_arg),
pre_segment_func_nb=pre_segment_func_nb, pre_segment_args=(pre_segment_lst, sub_arg),
post_segment_func_nb=post_segment_func_nb, post_segment_args=(post_segment_lst, sub_arg),
post_order_func_nb=post_order_func_nb, post_order_args=(post_order_lst, sub_arg),
segment_mask=segment_mask, call_pre_segment=False, call_post_segment=False,
row_wise=True, flexible=True, template_mapping=dict(np=np)
)
assert call_i[0] == 36
assert list(pre_sim_lst) == [1]
assert list(post_sim_lst) == [36]
assert list(pre_row_lst) == [2, 4, 11, 20, 34]
assert list(post_row_lst) == [3, 10, 19, 33, 35]
assert list(pre_segment_lst) == [5, 12, 21, 28]
assert list(post_segment_lst) == [9, 18, 27, 32]
assert list(order_lst) == [6, 8, 13, 15, 17, 22, 24, 26, 29, 31]
assert list(post_order_lst) == [7, 14, 16, 23, 25, 30]
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_orders(self, test_row_wise, test_flexible):
order_func = flex_order_func_nb if test_flexible else order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_orders=14, flexible=test_flexible)
@pytest.mark.parametrize("test_row_wise", [False, True])
@pytest.mark.parametrize("test_flexible", [False, True])
def test_max_logs(self, test_row_wise, test_flexible):
log_order_func = log_flex_order_func_nb if test_flexible else log_order_func_nb
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, flexible=test_flexible)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=15, flexible=test_flexible)
with pytest.raises(Exception):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func, np.asarray(np.inf),
row_wise=test_row_wise, max_logs=14, flexible=test_flexible)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'both']
group_by = pd.Index(['first', 'first', 'second'], name='group')
pf = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # independent
pf_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D', attach_call_seq=True
) # grouped
pf_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='amount', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D', attach_call_seq=True
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
pf2 = pf.copy()
pf2._metrics = pf2._metrics.copy()
pf2.metrics['hello'] = 'world'
pf2._subplots = pf2.subplots.copy()
pf2.subplots['hello'] = 'world'
assert vbt.Portfolio.loads(pf2['a'].dumps()) == pf2['a']
assert vbt.Portfolio.loads(pf2.dumps()) == pf2
pf2.save(tmp_path / 'pf')
assert vbt.Portfolio.load(tmp_path / 'pf') == pf2
def test_wrapper(self):
pd.testing.assert_index_equal(
pf.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
price_na.columns
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.grouper.group_by is None
assert pf.wrapper.grouper.allow_enable
assert pf.wrapper.grouper.allow_disable
assert pf.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_grouped.wrapper.columns,
price_na.columns
)
assert pf_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_grouped.wrapper.grouper.group_by,
group_by
)
assert pf_grouped.wrapper.grouper.allow_enable
assert pf_grouped.wrapper.grouper.allow_disable
assert pf_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
pf_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
pf_shared.wrapper.columns,
price_na.columns
)
assert pf_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
pf_shared.wrapper.grouper.group_by,
group_by
)
assert not pf_shared.wrapper.grouper.allow_enable
assert pf_shared.wrapper.grouper.allow_disable
assert not pf_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert pf['a'].wrapper == pf.wrapper['a']
assert pf['a'].orders == pf.orders['a']
assert pf['a'].logs == pf.logs['a']
assert pf['a'].init_cash == pf.init_cash['a']
pd.testing.assert_series_equal(pf['a'].call_seq, pf.call_seq['a'])
assert pf['c'].wrapper == pf.wrapper['c']
assert pf['c'].orders == pf.orders['c']
assert pf['c'].logs == pf.logs['c']
assert pf['c'].init_cash == pf.init_cash['c']
pd.testing.assert_series_equal(pf['c'].call_seq, pf.call_seq['c'])
assert pf[['c']].wrapper == pf.wrapper[['c']]
assert pf[['c']].orders == pf.orders[['c']]
assert pf[['c']].logs == pf.logs[['c']]
pd.testing.assert_series_equal(pf[['c']].init_cash, pf.init_cash[['c']])
pd.testing.assert_frame_equal(pf[['c']].call_seq, pf.call_seq[['c']])
assert pf_grouped['first'].wrapper == pf_grouped.wrapper['first']
assert pf_grouped['first'].orders == pf_grouped.orders['first']
assert pf_grouped['first'].logs == pf_grouped.logs['first']
assert pf_grouped['first'].init_cash == pf_grouped.init_cash['first']
pd.testing.assert_frame_equal(pf_grouped['first'].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped[['first']].wrapper == pf_grouped.wrapper[['first']]
assert pf_grouped[['first']].orders == pf_grouped.orders[['first']]
assert pf_grouped[['first']].logs == pf_grouped.logs[['first']]
pd.testing.assert_series_equal(
pf_grouped[['first']].init_cash,
pf_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(pf_grouped[['first']].call_seq, pf_grouped.call_seq[['a', 'b']])
assert pf_grouped['second'].wrapper == pf_grouped.wrapper['second']
assert pf_grouped['second'].orders == pf_grouped.orders['second']
assert pf_grouped['second'].logs == pf_grouped.logs['second']
assert pf_grouped['second'].init_cash == pf_grouped.init_cash['second']
pd.testing.assert_series_equal(pf_grouped['second'].call_seq, pf_grouped.call_seq['c'])
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].wrapper == pf_grouped.wrapper[['second']]
assert pf_grouped[['second']].orders == pf_grouped.orders[['second']]
assert pf_grouped[['second']].logs == pf_grouped.logs[['second']]
pd.testing.assert_series_equal(
pf_grouped[['second']].init_cash,
pf_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(pf_grouped[['second']].call_seq, pf_grouped.call_seq[['c']])
assert pf_shared['first'].wrapper == pf_shared.wrapper['first']
assert pf_shared['first'].orders == pf_shared.orders['first']
assert pf_shared['first'].logs == pf_shared.logs['first']
assert pf_shared['first'].init_cash == pf_shared.init_cash['first']
pd.testing.assert_frame_equal(pf_shared['first'].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].wrapper == pf_shared.wrapper[['first']]
assert pf_shared[['first']].orders == pf_shared.orders[['first']]
assert pf_shared[['first']].logs == pf_shared.logs[['first']]
pd.testing.assert_series_equal(
pf_shared[['first']].init_cash,
pf_shared.init_cash[['first']])
pd.testing.assert_frame_equal(pf_shared[['first']].call_seq, pf_shared.call_seq[['a', 'b']])
assert pf_shared['second'].wrapper == pf_shared.wrapper['second']
assert pf_shared['second'].orders == pf_shared.orders['second']
assert pf_shared['second'].logs == pf_shared.logs['second']
assert pf_shared['second'].init_cash == pf_shared.init_cash['second']
pd.testing.assert_series_equal(pf_shared['second'].call_seq, pf_shared.call_seq['c'])
assert pf_shared[['second']].wrapper == pf_shared.wrapper[['second']]
assert pf_shared[['second']].orders == pf_shared.orders[['second']]
assert pf_shared[['second']].logs == pf_shared.logs[['second']]
pd.testing.assert_series_equal(
pf_shared[['second']].init_cash,
pf_shared.init_cash[['second']])
pd.testing.assert_frame_equal(pf_shared[['second']].call_seq, pf_shared.call_seq[['c']])
def test_regroup(self):
assert pf.regroup(None) == pf
assert pf.regroup(False) == pf
assert pf.regroup(group_by) != pf
pd.testing.assert_index_equal(pf.regroup(group_by).wrapper.grouper.group_by, group_by)
assert pf_grouped.regroup(None) == pf_grouped
assert pf_grouped.regroup(False) != pf_grouped
assert pf_grouped.regroup(False).wrapper.grouper.group_by is None
assert pf_grouped.regroup(group_by) == pf_grouped
assert pf_shared.regroup(None) == pf_shared
with pytest.raises(Exception):
_ = pf_shared.regroup(False)
assert pf_shared.regroup(group_by) == pf_shared
def test_cash_sharing(self):
assert not pf.cash_sharing
assert not pf_grouped.cash_sharing
assert pf_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
pf.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_orders(self):
record_arrays_close(
pf.orders.values,
np.array([
(0, 0, 1, 0.1, 2.02, 0.10202, 0), (1, 0, 2, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 0, 4, 1.0, 5.05, 0.1505, 0), (3, 1, 0, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 1, 3, 0.1, 4.04, 0.10404000000000001, 0),
(6, 1, 4, 1.0, 4.95, 0.14950000000000002, 1), (7, 2, 0, 1.0, 1.01, 0.1101, 0),
(8, 2, 1, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 2, 3, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
pf.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, np.nan, 100.0, 1.0, np.nan, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.0, 0.0, 0.0,
100.0, np.nan, 100.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 0, 0, 1, 100.0, 0.0, 0.0, 100.0, 2.0, 100.0, 0.1, 2.0, 0, 0, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.69598, 0.1,
0.0, 99.69598, 2.0, 100.0, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 0, 0, 2, 99.69598, 0.1, 0.0, 99.69598, 3.0, 99.99598, -1.0, 3.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 99.89001,
0.0, 0.0, 99.89001, 3.0, 99.99598, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 0, 0, 3, 99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, -0.1, 4.0,
0, 0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
99.89001, 0.0, 0.0, 99.89001, 4.0, 99.89001, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 0, 0, 4, 99.89001, 0.0, 0.0, 99.89001, 5.0, 99.89001, 1.0, 5.0, 0,
0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 94.68951,
1.0, 0.0, 94.68951, 5.0, 99.89001, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 1, 1, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 1, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.8801, -1.0,
0.99, 98.9001, 1.0, 100.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 0.99, 98.9001, 2.0, 98.8801, 0.1, 2.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.8801, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 1, 1, 2, 100.97612, -1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999,
-1.0, np.nan, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 100.97612,
-1.1, 1.188, 98.60011999999999, 2.0, 98.77611999999999, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 1, 1, 3, 100.97612, -1.1, 1.188, 98.60011999999999, 4.0, 96.57611999999999,
-0.1, 4.0, 0, 1, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
100.46808, -1.0, 1.08, 98.30807999999999, 4.0, 96.57611999999999, 0.1, 4.04,
0.10404000000000001, 0, 0, -1, 5),
(9, 1, 1, 4, 100.46808, -1.0, 1.08, 98.30807999999999, 5.0, 95.46808, 1.0, 5.0, 0, 1,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 105.26858, -2.0, 6.03,
93.20857999999998, 5.0, 95.46808, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 2, 2, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, 1.0, 1.0, 0, 2, 0.01, 0.1,
0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.8799, 1.0, 0.0, 98.8799,
1.0, 100.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 2, 2, 1, 98.8799, 1.0, 0.0, 98.8799, 2.0, 100.8799, 0.1, 2.0, 0, 2, 0.01,
0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True, 98.57588000000001, 1.1,
0.0, 98.57588000000001, 2.0, 100.8799, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 0.0, 98.57588000000001, 3.0, 101.87588000000001,
-1.0, 3.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001, 3.0,
101.87588000000001, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 2, 2, 3, 101.41618000000001, 0.10000000000000009, 0.0, 101.41618000000001,
4.0, 101.81618000000002, -0.1, 4.0, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0,
False, True, False, True, 101.70822000000001, 0.0, 0.0, 101.70822000000001,
4.0, 101.81618000000002, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 2, 2, 4, 101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
1.0, np.nan, 0, 2, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, False, True, False, True,
101.70822000000001, 0.0, 0.0, 101.70822000000001, 4.0, 101.70822000000001,
np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.logs.count(),
result
)
def test_entry_trades(self):
record_arrays_close(
pf.entry_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0, -0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 1.0, 0, 0.99, 0.10990000000000001, 4, 4.954285714285714,
0.049542857142857145, -4.12372857142857, -4.165382395382394, 1, 0, 2),
(3, 1, 0.1, 1, 1.98, 0.10198, 4, 4.954285714285714, 0.004954285714285714,
-0.4043628571428571, -2.0422366522366517, 1, 0, 2),
(4, 1, 1.0, 4, 4.95, 0.14950000000000002, 4, 4.954285714285714,
0.049542857142857145, -0.20332857142857072, -0.04107647907647893, 1, 0, 2),
(5, 2, 1.0, 0, 1.01, 0.1101, 3, 3.0599999999999996, 0.21241818181818184,
1.727481818181818, 1.71037803780378, 0, 1, 3),
(6, 2, 0.1, 1, 2.02, 0.10202, 3, 3.0599999999999996, 0.021241818181818185,
-0.019261818181818203, -0.09535553555355546, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 3, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_entry_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_entry_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([5, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_entry_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.entry_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.entry_trades.count(),
result
)
def test_exit_trades(self):
record_arrays_close(
pf.exit_trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_exit_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_exit_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_exit_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.exit_trades.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.exit_trades.count(),
result
)
def test_positions(self):
record_arrays_close(
pf.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0, 2),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
pf.drawdowns.values,
np.array([
(0, 0, 0, 1, 4, 4, 100.0, 99.68951, 99.68951, 0),
(1, 1, 0, 1, 4, 4, 99.8801, 95.26858, 95.26858, 0),
(2, 2, 2, 3, 3, 4, 101.71618000000001, 101.70822000000001, 101.70822000000001, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
pf.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
pf_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
pf.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
pf_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
pf_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(pf.close, price_na)
pd.testing.assert_frame_equal(pf_grouped.close, price_na)
pd.testing.assert_frame_equal(pf_shared.close, price_na)
def test_get_filled_close(self):
pd.testing.assert_frame_equal(
pf.get_filled_close(),
price_na.ffill().bfill()
)
def test_asset_flow(self):
pd.testing.assert_frame_equal(
pf.asset_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_flow(),
result
)
def test_assets(self):
pd.testing.assert_frame_equal(
pf.assets(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.assets(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.assets(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.assets(),
result
)
pd.testing.assert_frame_equal(
pf_shared.assets(),
result
)
def test_position_mask(self):
pd.testing.assert_frame_equal(
pf.position_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.position_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.position_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.position_mask(),
result
)
pd.testing.assert_frame_equal(
pf_shared.position_mask(),
result
)
def test_position_coverage(self):
pd.testing.assert_series_equal(
pf.position_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('position_coverage')
)
pd.testing.assert_series_equal(
pf.position_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('position_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('position_coverage')
pd.testing.assert_series_equal(
pf.position_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.position_coverage(),
result
)
pd.testing.assert_series_equal(
pf_shared.position_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
pf.cash_flow(free=True),
pd.DataFrame(
np.array([
[0.0, -1.0998999999999999, -1.1201],
[-0.30402, -0.2999800000000002, -0.3040200000000002],
[0.19402999999999998, 0.0, 2.8402999999999996],
[0.0, -0.2920400000000002, 0.29204000000000035],
[-5.2005, -5.0995, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
pf_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
pf.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
pf_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
pf.cash(free=True),
pd.DataFrame(
np.array([
[100.0, 98.9001, 98.8799],
[99.69598, 98.60011999999999, 98.57588000000001],
[99.89001, 98.60011999999999, 101.41618000000001],
[99.89001, 98.30807999999999, 101.70822000000001],
[94.68951, 93.20857999999998, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.cash(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
pf_shared.cash(),
result
)
def test_asset_value(self):
pd.testing.assert_frame_equal(
pf.asset_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.asset_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., 2.2, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., -2.2, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[-2.2, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0.0, 0.01000999998999, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.021825370842812494, 0.0],
[0.0, 0.03909759620159034, 0.0],
[0.0, 0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0.0, -0.010214494162927312, 0.010012024441354066],
[0.00200208256628545, -0.022821548354919067, 0.021830620581035857],
[0.0, -0.022821548354919067, 0.002949383274126105],
[0.0, -0.04241418126633477, 0.0],
[0.050155728521486365, -0.12017991413866216, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.00505305454620791, 0.010012024441354066],
[0.0010005203706447724, -0.011201622483733716, 0.021830620581035857],
[0.0, -0.011201622483733716, 0.002949383274126105],
[0.0, -0.020585865497718882, 0.0],
[0.025038871596209537, -0.0545825965137659, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00505305454620791, 0.010012024441354066],
[-0.010188689433972452, 0.021830620581035857],
[-0.0112078992458765, 0.002949383274126105],
[-0.02059752492931316, 0.0],
[-0.027337628293439265, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0.0, -0.01000999998999, 0.010012024441354066],
[0.00200208256628545, -0.021825370842812494, 0.021830620581035857],
[0.0, -0.021825370842812494, 0.002949383274126105],
[0.0, -0.03909759620159034, 0.0],
[0.050155728521486365, -0.09689116931945001, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.005002498748124688, 0.010012024441354066],
[0.0010005203706447724, -0.010956168751293576, 0.021830620581035857],
[0.0, -0.010956168751293576, 0.002949383274126105],
[0.0, -0.019771825228137207, 0.0],
[0.025038871596209537, -0.049210520540028384, 0.0]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005002498748124688, 0.010012024441354066],
[-0.009965205542937988, 0.021830620581035857],
[-0.010962173376438594, 0.002949383274126105],
[-0.019782580537729116, 0.0],
[-0.0246106361476199, 0.0]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
pf_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, 98.77612, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, 198.77612, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[198.66613, 198.6721, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[198.66613, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
pf.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
pf.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
pf_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
pf.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.023366376407576966, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, 0.0, 9.33060570e-03],
[0.00000000e+00, -0.011611253907159497, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
pf_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[-3.0049513746473233e-05, 0.0, 9.33060570e-03],
[0.0, -0.011617682390048093, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[-3.0049513746473233e-05, 9.33060570e-03],
[-0.011617682390048093, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.returns(),
result
)
def test_asset_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, 0.0, 0.42740909],
[0., -1.0491090909090908, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[-0.0029850000000000154, 0.42740909],
[-1.0491090909090908, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.asset_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.asset_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.asset_returns(),
result
)
def test_benchmark_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_value(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_value(),
result
)
def test_benchmark_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
pf.benchmark_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
pf_grouped.benchmark_returns(),
result
)
pd.testing.assert_frame_equal(
pf_shared.benchmark_returns(),
result
)
def test_total_benchmark_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(group_by=False),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_benchmark_return')
pd.testing.assert_series_equal(
pf.total_benchmark_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
pf_grouped.total_benchmark_return(),
result
)
pd.testing.assert_series_equal(
pf_shared.total_benchmark_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.000599499999999975, -0.0012009999999998966],
[-0.006639499999999909, 0.007758800000000177],
[-0.006669349999999907, 0.017161800000000005],
[-0.01820955000000002, 0.017082199999999936],
[-0.025209550000000136, 0.017082199999999936]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
pf_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0.0, -0.000599499999999975, -0.0012009999999998966],
[-0.0005201000000001343, -0.006119399999999886, 0.007758800000000177],
[-0.0005499500000001323, -0.006119399999999886, 0.017161800000000005],
[-0.0005499500000001323, -0.017659599999999886, 0.017082199999999936],
[-0.0015524500000001495, -0.023657099999999875, 0.017082199999999936]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-59.62258787402645, -23.91718815937344]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-20.095906945591288, 12.345065267401496]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-13.30950646054953, -19.278625117344564, 12.345065267401496]),
index=price_na.columns
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
pf_shared.information_ratio(group_by=False),
pd.Series(
np.array([-0.9988561334618041, -0.8809478746008806, -0.884780642352239]),
index=price_na.columns
).rename('information_ratio')
)
with pytest.raises(Exception):
_ = pf_shared.information_ratio(pf_shared.benchmark_returns(group_by=False) * 2)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]', 'Worst Trade [%]',
'Avg Winning Trade [%]', 'Avg Losing Trade [%]',
'Avg Winning Trade Duration', 'Avg Losing Trade Duration',
'Profit Factor', 'Expectancy', 'Sharpe Ratio', 'Calmar Ratio',
'Omega Ratio', 'Sortino Ratio'
], dtype='object')
pd.testing.assert_series_equal(
pf.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 98.88877000000001, -1.11123, 283.3333333333333,
2.05906183131983, 0.42223000000000005, 1.6451238489727062, pd.Timedelta('3 days 08:00:00'),
2.0, 1.3333333333333333, 0.6666666666666666, -1.5042060606060605, 33.333333333333336,
-98.38058805880588, -100.8038553855386, 143.91625412541256, -221.34645964596464,
pd.Timedelta('2 days 12:00:00'), pd.Timedelta('2 days 00:00:00'), np.inf, 0.10827272727272726,
-6.751008013903537, 10378.930331014584, 4.768700318817701, 31.599760994679134
]),
index=stats_index,
name='agg_func_mean')
)
pd.testing.assert_series_equal(
pf.stats(column='a'),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(freq='10 days', year_freq='200 days')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('50 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('40 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('10 days 00:00:00'), 0.0, -0.10999000000000003,
-3.1151776875290866, -3.981409131683691, 0.0, -2.7478603669149457
]),
index=stats_index,
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(trade_type='positions')),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
2, 1, 1, -0.20049999999999982, 0.0, -54.450495049504966, -54.450495049504966,
np.nan, -54.450495049504966, pd.NaT, pd.Timedelta('1 days 00:00:00'), 0.0,
-0.10999000000000003, -13.30804491478906, -65.40868619923044, 0.0, -11.738864633265454
]),
index=pd.Index([
'Start', 'End', 'Period', 'Start Value', 'End Value',
'Total Return [%]', 'Benchmark Return [%]', 'Max Gross Exposure [%]',
'Total Fees Paid', 'Max Drawdown [%]', 'Max Drawdown Duration',
'Total Trades', 'Total Closed Trades', 'Total Open Trades',
'Open Trade PnL', 'Win Rate [%]', 'Best Trade [%]',
'Worst Trade [%]', 'Avg Winning Trade [%]',
'Avg Losing Trade [%]', 'Avg Winning Trade Duration',
'Avg Losing Trade Duration', 'Profit Factor', 'Expectancy',
'Sharpe Ratio', 'Calmar Ratio', 'Omega Ratio', 'Sortino Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
pf.stats(column='a', settings=dict(required_return=0.1, risk_free=0.01)),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, 99.68951, -0.3104899999999997, 150.0,
5.015572852148637, 0.35549, 0.3104900000000015, | pd.Timedelta('4 days 00:00:00') | pandas.Timedelta |
# Standard libraries
import sys
from typing import Optional
import threading
# Third party libraries
import discord
from discord.ext import commands
import pandas as pd
# Local dependencies
from util.vars import config
from util.db import get_db
def get_guild(bot: commands.Bot) -> discord.Guild:
"""
Returns the guild / server the bot is currently connected to.
Parameters
----------
commands.Bot
The bot object.
Returns
-------
discord.Guild
The guild / server the bot is currently connected to.
"""
return discord.utils.get(
bot.guilds,
# Return the debug server if -test is used as an argument
name=config["DEBUG"]["GUILD_NAME"]
if len(sys.argv) > 1 and sys.argv[1] == "-test"
else config["DISCORD"]["GUILD_NAME"],
)
def get_channel(bot: commands.Bot, channel_name: str) -> discord.TextChannel:
"""
Returns the discord.TextChannel object of the channel with the given name.
Parameters
----------
bot : commands.Bot
The bot object.
channel_name : str
The name of the channel.
Returns
-------
discord.TextChannel
The discord.TextChannel object of the channel with the given name.
"""
return discord.utils.get(
bot.get_all_channels(),
guild__name=config["DEBUG"]["GUILD_NAME"]
if len(sys.argv) > 1 and sys.argv[1] == "-test"
else config["DISCORD"]["GUILD_NAME"],
name=channel_name,
)
def get_emoji(bot: commands.Bot, emoji: str) -> discord.Emoji:
"""
Returns the custom emoji with the given name.
Parameters
----------
bot : commands.Bot
The bot object.
emoji : str
The name of the emoji.
Returns
-------
discord.Emoji
The custom emoji with the given name.
"""
guild = get_guild(bot)
return discord.utils.get(guild.emojis, name=emoji)
async def get_user(bot: commands.Bot, user_id: int) -> discord.User:
"""
Gets the discord.User object of the user with the given id.
Parameters
----------
bot : commands.Bot
The bot object.
user_id : int
The id of the user.
Returns
-------
discord.User
The discord.User object of the user with the given id.
"""
return await bot.fetch_user(user_id)
assets_db = | pd.DataFrame() | pandas.DataFrame |
# TO DO
# 1. Fair probability
# 2. Hedge opportunities
# 3. Datapane map
# 4. Change since prior poll
# Import modules
import json
import requests
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import pandas as pd
pd.set_option('display.max_rows', None) #print all rows without truncating
pd.options.mode.chained_assignment = None #hide SettingWithCopyWarning
import numpy as np
import datetime
import os
import zipfile #Economist
import urllib.request #Economist
# Pull in market data from PredictIt's API
Predictit_URL = "https://www.predictit.org/api/marketdata/all/"
Predictit_response = requests.get(Predictit_URL)
jsondata = Predictit_response.json()
# Replace null values with zero
def dict_clean(items):
result = {}
for key, value in items:
if value is None:
value = 0
result[key] = value
return result
dict_str = json.dumps(jsondata)
jsondata = json.loads(dict_str, object_pairs_hook=dict_clean)
# Market data by contract/price in dataframe
data = []
for p in jsondata['markets']:
for k in p['contracts']:
data.append([p['id'],p['name'],k['id'],k['name'],k['bestBuyYesCost'],k['bestBuyNoCost'],k['bestSellYesCost'],k['bestSellNoCost']])
# Pandas dataframe named 'predictit_df'
predictit_df = pd.DataFrame(data)
# Update dataframe column names
predictit_df.columns=['Market_ID','Market_Name','Contract_ID','Contract_Name','PredictIt_Yes','bestBuyNoCost','BestSellYesCost','BestSellNoCost']
# Filter PredicitIt dataframe to presidential state markets/contracts
predictit_df = predictit_df[predictit_df['Market_Name'].str.contains("Which party will win") & predictit_df['Market_Name'].str.contains("2020 presidential election?")]
# Fix annoying typo (double space) in congressional district market names
predictit_df['Market_Name'] = predictit_df['Market_Name'].str.replace('in the 2020','in the 2020')
# Split Market_Name column into state name column
start_string = "Which party will win"
end_string = "in the 2020 presidential election?"
predictit_df['a'], predictit_df['state'] = predictit_df['Market_Name'].str.split(start_string, 1).str
predictit_df['state'], predictit_df['b'] = predictit_df['state'].str.split(end_string, 1).str
del predictit_df['a']
del predictit_df['b']
# Create answer column from contract names
predictit_df['answer'] = predictit_df['Contract_Name'].str.replace('Republican','Trump').str.replace('Democratic','Biden')
# Strip trailing/leading whitespaces in answer and state columns
predictit_df['state'] = predictit_df['state'].str.strip()
predictit_df['answer'] = predictit_df['answer'].str.strip()
# Pull in polling data from 538
pres_polling = pd.read_csv('https://projects.fivethirtyeight.com/polls-page/president_polls.csv')
pres_polling = pres_polling.dropna(subset=['state'])
# Drop extraneous columns
pres_polling = pres_polling.drop(['pollster_id', 'sponsor_ids','sponsors','display_name', 'pollster_rating_id', 'pollster_rating_name', 'fte_grade', 'sample_size', 'population', 'population_full', 'methodology', 'seat_number', 'seat_name', 'start_date', 'sponsor_candidate', 'internal', 'partisan', 'tracking', 'nationwide_batch', 'ranked_choice_reallocated', 'notes', 'url'], axis=1)
# Standardize congressional district names in 538 with PredictIt
pres_polling['state'] = pres_polling['state'].str.replace('Maine CD-1','ME-01')
pres_polling['state'] = pres_polling['state'].str.replace('Maine CD-2','ME-02')
pres_polling['state'] = pres_polling['state'].str.replace('Nebraska CD-2','NE-02')
# Filter to most recent poll for Biden & Trump
# create a count column for 'question_id' to work around "Delaware problem": multiple matchups in same survey
pres_polling = pres_polling.loc[pres_polling['pollster'] != 'SurveyMonkey'] # filter out SurveyMonkey polls
pres_polling['created_at'] = pd.to_datetime(pres_polling['created_at']) #convert 'created_at' to datetime
recent_pres_polling = pres_polling[pres_polling['answer'].isin(['Biden', 'Trump'])]
recent_pres_polling['Count'] = recent_pres_polling.groupby('question_id')['question_id'].transform('count')
recent_pres_polling = recent_pres_polling[(recent_pres_polling.Count > 1)]
recent_pres_polling = recent_pres_polling.sort_values(by=['question_id'], ascending=False).drop_duplicates(['state', 'candidate_name'], keep='first')
# Rename 538 'pct' column to '538_latest_poll'
recent_pres_polling = recent_pres_polling.rename({'pct': '538_latest_poll'}, axis=1)
# Rename 538 'end_date' column to '538_poll_date'
recent_pres_polling = recent_pres_polling.rename({'end_date': '538_poll_date'}, axis=1)
# Pull in polling data from 538 polling averages
pres_poll_avg = pd.read_csv('https://projects.fivethirtyeight.com/2020-general-data/presidential_poll_averages_2020.csv')
# Drop extraneous columns
pres_poll_avg = pres_poll_avg.drop(['cycle'], axis=1)
# Standardize congressional district names in 538 polling averages with PredictIt
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Maine CD-1','ME-01')
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Maine CD-2','ME-02')
pres_poll_avg['state'] = pres_poll_avg['state'].str.replace('Nebraska CD-2','NE-02')
# Standarize candidate names and column name
pres_poll_avg.replace({'candidate_name' : { '<NAME>.' : 'Biden', '<NAME>' : 'Trump'}})
pres_poll_avg['answer'] = pres_poll_avg['candidate_name']
# Filter to most recent poll for Biden & Trump
pres_poll_avg['modeldate'] = pd.to_datetime(pres_poll_avg['modeldate']) #convert 'modeldate' to datetime
pres_poll_avg = pres_poll_avg.sort_values(by=['modeldate']).drop_duplicates(['state', 'candidate_name'], keep='last')
pres_poll_avg = pres_poll_avg[pres_poll_avg['answer'].isin(['Biden', 'Trump'])]
# Round pct_estimate and pct_trend_adjusted to 2 decimal places
pres_poll_avg['pct_estimate'] = pres_poll_avg['pct_estimate'].round(2)
pres_poll_avg['pct_trend_adjusted'] = pres_poll_avg['pct_trend_adjusted'].round(2)
# Merge 538 poll and 538 poll averages dataframes together
recent_pres_polling = pd.merge(recent_pres_polling, pres_poll_avg, on=['state', 'answer'], how='left')
# Pull in most recent state-level model data from 538
pres_model = | pd.read_csv('https://projects.fivethirtyeight.com/2020-general-data/presidential_state_toplines_2020.csv') | pandas.read_csv |
from daily_clifile_editor import compute_breakpoint
import pandas as pd
import subprocess
import numpy as np
import matplotlib.pyplot as plt
# Jun 11 2015
precip = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0.02,
0.02,
0.02,
0.02,
0.02,
0.09,
0.09,
0.12,
0.09,
0.09,
0.14,
0.14,
0.14,
0.09,
0.36,
1.34,
1.34,
1.34,
0.89,
0.38,
0.38,
0.12,
0.09,
0.09,
0.14,
0.14,
0.17,
0.15,
0.15,
0.30,
0.27,
0.27,
0.60,
0.60,
0.60,
0.27,
0.27,
0.20,
0.20,
0.21,
0.20,
0.20,
0.29,
0.35,
0.35,
0.23,
0.09,
0.06,
0.06,
0.05,
0.05,
0.05,
0.03,
0.03,
0.06,
0.06,
0.06,
0.08,
0.08,
0.11,
0.14,
0.14,
0.06,
0.06,
0.06,
0.09,
0.09,
0.12,
0.15,
0.15,
0.14,
0.14,
0.14,
0.11,
0.14,
0.11,
0.11,
0.11,
0.14,
0.14,
0.20,
0.29,
0.29,
0.23,
0.20,
0.17,
0.17,
0.17,
0.14,
0.12,
0.12,
0.11,
0.18,
0.18,
0.21,
0.20,
0.21,
0.20,
0.20,
0.26,
0.21,
0.05,
0.05,
0.08,
0.08,
0.08,
0.08,
0.11,
0.11,
0.12,
0.12,
0.12,
0.21,
0.21,
0.20,
0.18,
0.18,
0.14,
0.14,
0.14,
0.15,
0.15,
0.15,
0.15,
0.15,
0.14,
0.14,
0.14,
0.12,
0.12,
0.09,
0.09,
0.09,
0.05,
0.03,
0.06,
0.06,
0.06,
0.06,
0.06,
0.12,
0.12,
0.15,
0.17,
0.17,
0.14,
0.14,
0.09,
0.09,
0.14,
0.14,
0.17,
0.17,
0.15,
0.12,
0.12,
0.15,
0.15,
0.26,
0.29,
0.38,
0.38,
0.50,
0.42,
0.35,
0.35,
0.50,
0.50,
0.42,
0.42,
0.45,
0.44,
0.44,
0.50,
0.56,
0.57,
0.53,
0.53,
0.36,
0.24,
0.24,
0.09,
0.09,
0.08,
0.09,
0.09,
0.06,
0.03,
0.05,
0.11,
0.11,
0.20,
0.41,
0.41,
0.14,
0.14,
0.12,
0.11,
0.11,
0.09,
0.09,
0.09,
0.11,
0.11,
0.12,
0.14,
0.14,
0.06,
0.06,
0.06,
0.03,
0.03,
0.11,
0.15,
0.15,
0.12,
0.12,
0.12,
0.06,
0.06,
0.20,
0.20,
0.20,
0.12,
0.12,
1.35,
2.68,
2.68,
3.62,
3.62,
2.46,
0.38,
0.38,
1.05,
1.05,
1.04,
0.59,
0.59,
0.86,
0.90,
0.90,
0.51,
0.51,
0.41,
0.23,
0.23,
0.59,
0.59,
1.41,
2.19,
2.19,
2.16,
1.41,
1.41,
1.02,
1.02,
1.02,
1.04,
1.04,
1.71,
1.71,
1.71,
0.92,
0.92,
0.92,
0.72,
0.72,
0.63,
0.29,
0.29,
0.03,
0.03,
0.02,
0.02,
0.02,
0.02,
0.02,
0.02,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0.02,
0.02,
0.05,
0.05,
0.03,
0.02,
0.02,
0.05,
0.05,
0.03,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03,
0.03,
0.03,
0.02,
0.02,
0.02,
0.02,
0.02,
0,
0,
0.03,
0.03,
0.03,
0.05,
0.24,
0.24,
1.01,
0.92,
0.24,
0.45,
0.45,
1.34,
1.34,
1.34,
1.34,
1.34,
2.06,
2.06,
1.94,
0,
0.02,
0.02,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.11,
0.11,
0.24,
0.24,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0.06,
0.06,
0.03,
0.03,
0.03,
0.02,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
def get_results():
for line in open("/i/0/env/10240010/0109/102400100109_44.env"):
tokens = line.strip().split()
if tokens[0] == "11" and tokens[1] == "6" and tokens[2] == "9":
return dict(
runoff=float(tokens[4]),
loss=float(tokens[5]),
delivery=float(tokens[12]),
)
def get_maxrate(bpdata):
t = []
r = []
for line in bpdata:
tokens = line.split()
if len(tokens) != 2:
continue
t.append(float(tokens[0]))
r.append(float(tokens[1]))
maxr = 0
for i in range(1, len(t)):
dt = t[i] - t[i - 1]
dr = r[i] - r[i - 1]
rate = dr / dt
if rate > maxr:
maxr = rate
return maxr
def edit(bpdata):
o = open("/i/0/cli/095x041/094.86x040.84.cli").read()
pos1 = o.find("11\t6\t2015")
pos2 = o.find("12\t6\t2015")
out = open("/i/0/cli/095x041/094.86x040.84.cli", "w")
newdata = (
"11\t6\t2015\t%(points)s\t25.0\t20.0\t269\t4.2\t0\t20.0\n" "%(bp)s\n"
) % dict(points=len(bpdata), bp=("\n".join(bpdata)))
out.write(o[:pos1] + newdata + o[pos2:])
out.close()
def run():
print(
"%2s %2s %2s %6s %6s %6s %6s"
% ("I", "A", "SZ", "MAXR", "RUNOF", "LOSS", "DELIV")
)
rows = []
for intensityThres in range(1, 21):
for accumThres in range(1, 21):
bpdata = compute_breakpoint(
precip,
accumThreshold=accumThres,
intensityThreshold=intensityThres,
)
edit(bpdata)
maxr = get_maxrate(bpdata)
cmd = "~/bin/wepp < /i/0/run/10240010/0109/102400100109_44.run"
subprocess.call(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
res = get_results()
print(
("%2i %2i %2i %6.2f %6.2f %6.3f %6.3f")
% (
intensityThres,
accumThres,
len(bpdata),
maxr,
res["runoff"],
res["loss"],
res["delivery"],
)
)
rows.append(
dict(
intensitythres=intensityThres,
accumthres=accumThres,
points=len(bpdata),
maxrate=maxr,
runoff=res["runoff"],
loss=res["loss"],
delivery=res["delivery"],
)
)
df = pd.DataFrame(rows)
df.to_pickle("exercise.pickle")
def plot():
df = | pd.read_pickle("exercise.pickle") | pandas.read_pickle |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class DataProcessor:
def __init__(self, data_path):
self.orig_data = pd.read_csv(data_path)
self.data = self.orig_data
self.scaled_features = {}
self.train_features = None
self.train_targets = None
self.test_features = None
self.test_targets = None
self.test_data = None
self.val_features = None
self.val_targets = None
def show_data(self, plot_by_dteday=False):
print (self.data.head())
if plot_by_dteday == True:
self.data[:24*10].plot(x='dteday', y='cnt', title='Data for the first 10 days')
plt.show()
def virtualize(self):
# Add virtualized data columns
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(self.data[each], prefix=each, drop_first=False)
self.data = | pd.concat([self.data, dummies], axis=1) | pandas.concat |
import multiprocessing as mp
import numpy as np
import pandas as pd
def _get_ids(vol, bl, co):
"""Fetch block and extract IDs.
Parameters
----------
vol : CloudVolume
Volume to query.
bl : list-like
Coordinates defining the block:
left, right, top, bottom, z1, z2
co : numpy array
x/y/z coordinates WITHIN block
of segment IDs to fetch.
"""
# Unpack block indices
l, r, t, b, z1, z2 = bl
# Subset block to relevant parts (i.e. those containing
# requested coordinates) to save memory
mn = co.min(axis=0)
mx = co.max(axis=0) + 1
l, r = l + mn[0], l + mx[0]
t, b = t + mn[1], t + mx[1]
z1, z2 = z1 + mn[2], z1 + mx[2]
# Offset coordinates too
co -= mn
# Get the block
chunk = vol[l:r, t:b, z1:z2]
# Get the IDs out of the block
co_id = chunk[co[:, 0], co[:, 1], co[:, 2]]
return co_id
def get_multiple_ids(x, vol, max_workers=mp.cpu_count() - 5):
"""Return multiple segment IDs using cloudvolume.
Parameters
----------
x : numpy array
Array with x/y/z coordinates to fetch
segmentation IDs for.
vol : cloudvolume.CloudVolume
"""
# Make sure x is array
if not isinstance(x, np.ndarray):
x = np.array(x)
if not max_workers:
max_workers = 1
# Hard coded block size
blocksize = np.array([128, 128, 32])
# Make bins to fit with blocksize
xbins = np.arange(0, np.max(x) + blocksize[0], blocksize[0])
ybins = np.arange(0, np.max(x) + blocksize[1], blocksize[1])
zbins = np.arange(0, np.max(x) + blocksize[2], blocksize[2])
# Sort data into bins
cbin = | pd.DataFrame(x) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_io.ipynb (unless otherwise specified).
__all__ = ['dicom_dataframe', 'get_plane', 'is_axial', 'is_sagittal', 'is_coronal', 'is_fat_suppressed', 'load_mat',
'load_h5']
# Cell
from fastscript import call_parse, Param, bool_arg
from scipy import ndimage
import h5py
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pydicom
import scipy.io as spio
# Cell
def _get_dcm_paths_from_folderpath(path_to_dicom_dir, dicom_extension='DCM'):
"""
Returns a list of paths to dicom files within a directory.
Attributes:
path_to_dicom_dir (str): path to folder containing dicoms.
dicom_extension (str): case insensitive str with dicom extension.
Returns:
dcm_paths_list (list): list of paths to dicom files.
"""
if not path_to_dicom_dir.endswith(('/')):
path_to_dicom_dir = f"{path_to_dicom_dir}/"
filenames_list = os.listdir(path_to_dicom_dir)
dcm_paths_list = [os.path.join(path_to_dicom_dir, name) for name in filenames_list if str.lower(dicom_extension) in str.lower(name)]
return(dcm_paths_list)
# Cell
def _read_dicom_from_file(dicom_file_path):
"""
Reads dicom using pydicom.dcmread.
Attributes:
dicom_file_path (str): path to file of interest
Returns:
dcm (FileDataset): result from pydicom.dcmread()
"""
try:
return(pydicom.dcmread(dicom_file_path))
except Exception as error:
print(error)
# Cell
def _get_tag_from_loaded_dicom(dcm, tag):
"""
Get tag from loaded pydicom data structure.
Attributes:
dcm (FileDataset): dicom slice, result from pydicom.dcmread()
tag (str): key of interest in the dicom (e.g. "SOPClassUID")
Returns:
content (str): the contant of the dicom key
"""
try:
content = dcm[tag].value
return(content)
except Exception as e:
print(str(tag), " tag was not found! Skipping...", e)
return None
# Cell
def _get_normal_from_dicom_slice(dcm):
"""
Get normal vector from a dicom slice.
Attributes:
dcm (FileDataset): dicom slice, result from pydicom.dcmread()
Returns:
normal_vector (arr): numpy array with dimensions of slice's normal vector
"""
cosines = _get_tag_from_loaded_dicom(dcm,'ImageOrientationPatient')
cosines = [float(i) for i in cosines]
normal = np.empty(3)
normal[0] = cosines[1]*cosines[5] - cosines[2]*cosines[4];
normal[1] = cosines[2]*cosines[3] - cosines[0]*cosines[5];
normal[2] = cosines[0]*cosines[4] - cosines[1]*cosines[3];
return(np.array(normal))
# Cell
def _get_dicom_slice_IPP_along_normal(dcm):
"""
Get dicom slice ImagePositionPatient (IPP) in the direction of the slice's normal vector
Attributes:
dcm (FileDataset): dicom slice, result from pydicom.dcmread()
Returns:
slice_ipp (double): ImagePositionPatient value along slice's normal vector
"""
IPP = _get_tag_from_loaded_dicom(dcm, 'ImagePositionPatient')
IPP = [float(i) for i in IPP]
slice_ipp = np.inner(_get_normal_from_dicom_slice(dcm), IPP)
return(slice_ipp)
# Cell
def _get_distance_between_two_dicom_slices(dcm1, dcm2):
"""
Get distance between two dicom slices along the normal vector
Attributes:
dcm1 (FileDataset): dicom slice, result from pydicom.dcmread()
dcm2 (FileDataset): dicom slice, result from pydicom.dcmread()
Returns:
distance_vector (arr): numpy array with difference between center of two slices
"""
slice_position_1 = _get_dicom_slice_IPP_along_normal(dcm1)
slice_position_2 = _get_dicom_slice_IPP_along_normal(dcm2)
distance = np.abs(slice_position_2 - slice_position_1)
return(distance)
# Cell
class dicom_dataframe:
"""
Class for sorting, selecting and loading portions of dicom data.
Objects of this class are pandas dataframes with at least one main column ('DS')
that holds the results of pydicom.read() for each file read during initialization.
Users can populate the dataframe with new columns using DICOM tags to facilitate
subsequent filtering, sorting and data loading.
"""
def __init__(self, path_to_dicom_dir, dicom_extension='dcm', read_single_random_dcm=False):
"""
Read dicom files from folder and add them to a pandas dataframe.
Attributes:
path_to_dicom_dir (str): path to directory containing dicom files
dicom_extension (str): cases insensitive string with dicom file extension
read_single_random_dcm (bool): whether or not to return a single, random dicom file
Returns:
updates self.dataframe with new columns.
"""
if not path_to_dicom_dir.endswith(('/')):
path_to_dicom_dir = f"{path_to_dicom_dir}/"
df = | pd.DataFrame() | pandas.DataFrame |
import shutil
import sys
from argparse import ArgumentParser
from collections import Counter
from pathlib import Path
from zipfile import ZipFile
import numpy as np
import pandas as pd
import requests
from src.config import CONTEXT_SIZE, COVERAGE, DATA_DIR, TEXT8_URL, VOCAB_SIZE
from src.utils.logger import get_logger
logger = get_logger(__name__)
def download_data(url=TEXT8_URL, dest_dir=DATA_DIR):
# prepare destination
dest = Path(dest_dir) / Path(url).name
dest.parent.mkdir(parents=True, exist_ok=True)
# downlaod zip
if not dest.exists():
logger.info("downloading file: %s.", url)
r = requests.get(url, stream=True)
with dest.open("wb") as f:
shutil.copyfileobj(r.raw, f)
logger.info("file downloaded: %s.", dest)
# extract zip
if not Path(dest_dir, "text8").exists():
with dest.open("rb") as f, ZipFile(f, "r") as zf:
zf.extractall(dest_dir)
logger.info("file extracted.")
def load_data(src_dir=DATA_DIR):
file_path = Path(src_dir, "text8")
with open(file_path) as f:
text8 = f.read()
logger.info("file loaded: %s.", file_path)
return text8
def process_data(text8, vocab_size=VOCAB_SIZE, coverage=COVERAGE, context_size=CONTEXT_SIZE):
text8_tokens = text8.split()
# create vocab
df_vocab = create_vocabulary(text8_tokens, vocab_size, coverage)
vocab_size, _ = df_vocab.shape
logger.info("vocab created, size: %s.", vocab_size)
# compute interaction
df_interaction = create_interaction_dataframe(text8_tokens, df_vocab, context_size)
df_interaction = create_glove_dataframe(df_interaction)
return {"vocabulary": df_vocab, "interaction": df_interaction}
def create_vocabulary(text_tokens, vocab_size=VOCAB_SIZE, coverage=COVERAGE):
tokens_counter = Counter(text_tokens)
# find cumulative proportion of token counts
counts = np.sort(list(tokens_counter.values()))[::-1]
total = np.sum(counts)
counts_cumprop = np.cumsum(counts) / total
# get count with defined coverage of total tokens
count_cutoff = counts[np.searchsorted(counts_cumprop, coverage)]
logger.info("count cufoff: %s; token coverage: %s.", count_cutoff, coverage)
# get vocab and counts
vocab = [token for token, count in tokens_counter.most_common(vocab_size) if count >= count_cutoff]
vocab_counts = [tokens_counter[token] for token in vocab]
unk_count = total - np.sum(vocab_counts)
df_vocab = pd.DataFrame({"token": ["<UNK>"] + vocab, "count": [unk_count] + vocab_counts})
df_vocab["proportion"] = df_vocab["count"] / total
df_vocab = df_vocab.sort_values("count", ascending=False).reset_index(drop=True)
return df_vocab
def create_interaction_dataframe(text_tokens, df_vocab, context_size=CONTEXT_SIZE):
token2id = {token: i for i, token in enumerate(df_vocab["token"])}
token_ids = (token2id.get(token, 0) for token in text_tokens)
df = pd.DataFrame(list(enumerate(token_ids)), columns=["position", "token_id"])
# cross join by position for right context only
df_concat = pd.concat([df.set_index(df["position"] + i + 1) for i in range(context_size)])
df_co = df_concat.join(df, how="inner", lsuffix="_row", rsuffix="_col")
df_co = df_co.loc[(df_co["token_id_row"] != df_co["token_id_col"]) &
(df_co["position_row"] < df_co["position_col"]), :]
df_co = df_co.assign(**{"value": 1 / (df_co["position_col"] - df_co["position_row"])})
# aggregate interactions
df_agg = (df_co.groupby(["token_id_row", "token_id_col"])["value"]
.agg(["count", "sum"])
.reset_index()
.rename(columns={"token_id_row": "row_token_id", "token_id_col": "col_token_id", "sum": "value"}))
df_agg = df_agg.loc[(df_agg["count"] != 0) & (df_agg["value"] != 0), :]
# union swap row and col since symmetric
dfs_agg = [df_agg, df_agg.rename(columns={"row_token_id": "col_token_id", "col_token_id": "row_token_id"})]
df_agg = ( | pd.concat(dfs_agg, sort=False) | pandas.concat |
import copy
import os
import warnings
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pathlib import Path
from tqdm import tqdm
from scipy import stats
from typing import Tuple, Dict, Union
from scipy.spatial.distance import cdist
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import f1_score, mean_squared_error, jaccard_score
from sklearn.exceptions import ConvergenceWarning
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LogisticRegression
from dython.nominal import compute_associations, numerical_encoding
from .viz import *
from .metrics import *
from .notebook import visualize_notebook, isnotebook, EvaluationResult
from .utils import dict_to_df
class TableEvaluator:
"""
Class for evaluating synthetic data. It is given the real and fake data and allows the user to easily evaluate data with the `evaluate` method.
Additional evaluations can be done with the different methods of evaluate and the visual evaluation method.
"""
def __init__(self, real: pd.DataFrame, fake: pd.DataFrame, cat_cols=None, unique_thresh=0, metric='pearsonr',
verbose=False, n_samples=None, name: str = None, seed=1337):
"""
:param real: Real dataset (pd.DataFrame)
:param fake: Synthetic dataset (pd.DataFrame)
:param unique_thresh: Threshold for automatic evaluation if column is numeric
:param cat_cols: The columns that are to be evaluated as discrete. If passed, unique_thresh is ignored.
:param metric: the metric to use for evaluation linear relations. Pearson's r by default, but supports all models in scipy.stats
:param verbose: Whether to print verbose output
:param n_samples: Number of samples to evaluate. If none, it will take the minimal length of both datasets and cut the larger one off to make sure they
are the same length.
:param name: Name of the TableEvaluator. Used in some plotting functions like `viz.plot_correlation_comparison` to indicate your model.
"""
self.name = name
self.unique_thresh = unique_thresh
self.real = real.copy()
self.fake = fake.copy()
self.comparison_metric = getattr(stats, metric)
self.verbose = verbose
self.random_seed = seed
# Make sure columns and their order are the same.
if len(real.columns) == len(fake.columns):
fake = fake[real.columns.tolist()]
assert real.columns.tolist() == fake.columns.tolist(), 'Columns in real and fake dataframe are not the same'
if cat_cols is None:
real = real.infer_objects()
fake = fake.infer_objects()
self.numerical_columns = [column for column in real._get_numeric_data().columns if
len(real[column].unique()) > unique_thresh]
self.categorical_columns = [column for column in real.columns if column not in self.numerical_columns]
else:
self.categorical_columns = cat_cols
self.numerical_columns = [column for column in real.columns if column not in cat_cols]
# Make sure the number of samples is equal in both datasets.
if n_samples is None:
self.n_samples = min(len(self.real), len(self.fake))
elif len(fake) >= n_samples and len(real) >= n_samples:
self.n_samples = n_samples
else:
raise Exception(f'Make sure n_samples < len(fake/real). len(real): {len(real)}, len(fake): {len(fake)}')
self.real = self.real.sample(self.n_samples)
self.fake = self.fake.sample(self.n_samples)
assert len(self.real) == len(self.fake), f'len(real) != len(fake)'
self.real.loc[:, self.categorical_columns] = self.real.loc[:, self.categorical_columns].fillna('[NAN]').astype(
str)
self.fake.loc[:, self.categorical_columns] = self.fake.loc[:, self.categorical_columns].fillna('[NAN]').astype(
str)
self.real.loc[:, self.numerical_columns] = self.real.loc[:, self.numerical_columns].fillna(
self.real[self.numerical_columns].mean())
self.fake.loc[:, self.numerical_columns] = self.fake.loc[:, self.numerical_columns].fillna(
self.fake[self.numerical_columns].mean())
def plot_mean_std(self, fname=None):
"""
Class wrapper function for plotting the mean and std using `viz.plot_mean_std`.
:param fname: If not none, saves the plot with this file name.
"""
plot_mean_std(self.real, self.fake, fname=fname)
def plot_cumsums(self, nr_cols=4, fname=None):
"""
Plot the cumulative sums for all columns in the real and fake dataset. Height of each row scales with the length of the labels. Each plot contains the
values of a real columns and the corresponding fake column.
:param fname: If not none, saves the plot with this file name.
"""
nr_charts = len(self.real.columns)
nr_rows = max(1, nr_charts // nr_cols)
nr_rows = nr_rows + 1 if nr_charts % nr_cols != 0 else nr_rows
max_len = 0
# Increase the length of plots if the labels are long
if not self.real.select_dtypes(include=['object']).empty:
lengths = []
for d in self.real.select_dtypes(include=['object']):
lengths.append(max([len(x.strip()) for x in self.real[d].unique().tolist()]))
max_len = max(lengths)
row_height = 6 + (max_len // 30)
fig, ax = plt.subplots(nr_rows, nr_cols, figsize=(16, row_height * nr_rows))
fig.suptitle('Cumulative Sums per feature', fontsize=16)
axes = ax.flatten()
for i, col in enumerate(self.real.columns):
r = self.real[col]
f = self.fake.iloc[:, self.real.columns.tolist().index(col)]
cdf(r, f, col, 'Cumsum', ax=axes[i])
plt.tight_layout(rect=[0, 0.02, 1, 0.98])
if fname is not None:
plt.savefig(fname)
plt.show()
def plot_distributions(self, nr_cols=3, fname=None):
"""
Plot the distribution plots for all columns in the real and fake dataset. Height of each row of plots scales with the length of the labels. Each plot
contains the values of a real columns and the corresponding fake column.
:param fname: If not none, saves the plot with this file name.
"""
nr_charts = len(self.real.columns)
nr_rows = max(1, nr_charts // nr_cols)
nr_rows = nr_rows + 1 if nr_charts % nr_cols != 0 else nr_rows
max_len = 0
# Increase the length of plots if the labels are long
if not self.real.select_dtypes(include=['object']).empty:
lengths = []
for d in self.real.select_dtypes(include=['object']):
lengths.append(max([len(x.strip()) for x in self.real[d].unique().tolist()]))
max_len = max(lengths)
row_height = 6 + (max_len // 30)
fig, ax = plt.subplots(nr_rows, nr_cols, figsize=(16, row_height * nr_rows))
fig.suptitle('Distribution per feature', fontsize=16)
axes = ax.flatten()
for i, col in enumerate(self.real.columns):
if col not in self.categorical_columns:
plot_df = pd.DataFrame({col: self.real[col].append(self.fake[col]), 'kind': ['real'] * self.n_samples + ['fake'] * self.n_samples})
fig = sns.histplot(plot_df, x=col, hue='kind', ax=axes[i], stat='probability', legend=True)
axes[i].set_autoscaley_on(True)
else:
real = self.real.copy()
fake = self.fake.copy()
real['kind'] = 'Real'
fake['kind'] = 'Fake'
concat = pd.concat([fake, real])
palette = sns.color_palette(
[(0.8666666666666667, 0.5176470588235295, 0.3215686274509804),
(0.2980392156862745, 0.4470588235294118, 0.6901960784313725)])
x, y, hue = col, "proportion", "kind"
ax = (concat[x]
.groupby(concat[hue])
.value_counts(normalize=True)
.rename(y)
.reset_index()
.pipe((sns.barplot, "data"), x=x, y=y, hue=hue, ax=axes[i], saturation=0.8, palette=palette))
ax.set_xticklabels(axes[i].get_xticklabels(), rotation='vertical')
plt.tight_layout(rect=[0, 0.02, 1, 0.98])
if fname is not None:
plt.savefig(fname)
plt.show()
def plot_correlation_difference(self, plot_diff=True, fname=None, **kwargs):
"""
Plot the association matrices for each table and, if chosen, the difference between them.
:param plot_diff: whether to plot the difference
:param fname: If not none, saves the plot with this file name.
:param kwargs: kwargs for sns.heatmap
"""
plot_correlation_difference(self.real, self.fake, cat_cols=self.categorical_columns, plot_diff=plot_diff, fname=fname,
**kwargs)
def correlation_distance(self, how: str = 'euclidean') -> float:
"""
Calculate distance between correlation matrices with certain metric.
:param how: metric to measure distance. Choose from [``euclidean``, ``mae``, ``rmse``].
:return: distance between the association matrices in the chosen evaluation metric. Default: Euclidean
"""
from scipy.spatial.distance import cosine
if how == 'euclidean':
distance_func = euclidean_distance
elif how == 'mae':
distance_func = mean_absolute_error
elif how == 'rmse':
distance_func = rmse
elif how == 'cosine':
def custom_cosine(a, b):
return cosine(a.reshape(-1), b.reshape(-1))
distance_func = custom_cosine
else:
raise ValueError(f'`how` parameter must be in [euclidean, mae, rmse]')
real_corr = compute_associations(self.real, nominal_columns=self.categorical_columns, theil_u=True)
fake_corr = compute_associations(self.fake, nominal_columns=self.categorical_columns, theil_u=True)
return distance_func(
real_corr.values,
fake_corr.values
)
def plot_pca(self, fname=None):
"""
Plot the first two components of a PCA of real and fake data.
:param fname: If not none, saves the plot with this file name.
"""
real, fake = self.convert_numerical()
pca_r = PCA(n_components=2)
pca_f = PCA(n_components=2)
real_t = pca_r.fit_transform(real)
fake_t = pca_f.fit_transform(fake)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle('First two components of PCA', fontsize=16)
sns.scatterplot(ax=ax[0], x=real_t[:, 0], y=real_t[:, 1])
sns.scatterplot(ax=ax[1], x=fake_t[:, 0], y=fake_t[:, 1])
ax[0].set_title('Real data')
ax[1].set_title('Fake data')
if fname is not None:
plt.savefig(fname)
plt.show()
def get_copies(self, return_len: bool = False) -> Union[pd.DataFrame, int]:
"""
Check whether any real values occur in the fake data.
:param return_len: whether to return the length of the copied rows or not.
:return: Dataframe containing the duplicates if return_len=False, else integer indicating the number of copied rows.
"""
real_hashes = self.real.apply(lambda x: hash(tuple(x)), axis=1)
fake_hashes = self.fake.apply(lambda x: hash(tuple(x)), axis=1)
dup_idxs = fake_hashes.isin(real_hashes.values)
dup_idxs = dup_idxs[dup_idxs == True].sort_index().index.tolist()
if self.verbose:
print(f'Nr copied columns: {len(dup_idxs)}')
copies = self.fake.loc[dup_idxs, :]
if return_len:
return len(copies)
else:
return copies
def get_duplicates(self, return_values: bool = False) -> Tuple[Union[pd.DataFrame, int], Union[pd.DataFrame, int]]:
"""
Return duplicates within each dataset.
:param return_values: whether to return the duplicate values in the datasets. If false, the lengths are returned.
:return: dataframe with duplicates or the length of those dataframes if return_values=False.
"""
real_duplicates = self.real[self.real.duplicated(keep=False)]
fake_duplicates = self.fake[self.fake.duplicated(keep=False)]
if return_values:
return real_duplicates, fake_duplicates
else:
return len(real_duplicates), len(fake_duplicates)
def pca_correlation(self, lingress=False):
"""
Calculate the relation between PCA explained variance values. Due to some very large numbers, in recent implementation the MAPE(log) is used instead of
regressions like Pearson's r.
:param lingress: whether to use a linear regression, in this case Pearson's.
:return: the correlation coefficient if lingress=True, otherwise 1 - MAPE(log(real), log(fake))
"""
self.pca_r = PCA(n_components=5)
self.pca_f = PCA(n_components=5)
real, fake = self.convert_numerical()
self.pca_r.fit(real)
self.pca_f.fit(fake)
if self.verbose:
results = pd.DataFrame({'real': self.pca_r.explained_variance_, 'fake': self.pca_f.explained_variance_})
print(f'\nTop 5 PCA components:')
print(results.to_string())
if lingress:
corr, p, _ = self.comparison_metric(self.pca_r.explained_variance_, self.pca_f.explained_variance_)
return corr
else:
pca_error = mean_absolute_percentage_error(self.pca_r.explained_variance_, self.pca_f.explained_variance_)
return 1 - pca_error
def fit_estimators(self):
"""
Fit self.r_estimators and self.f_estimators to real and fake data, respectively.
"""
if self.verbose:
print(f'\nFitting real')
for i, c in enumerate(self.r_estimators):
if self.verbose:
print(f'{i + 1}: {type(c).__name__}')
c.fit(self.real_x_train, self.real_y_train)
if self.verbose:
print(f'\nFitting fake')
for i, c in enumerate(self.f_estimators):
if self.verbose:
print(f'{i + 1}: {type(c).__name__}')
c.fit(self.fake_x_train, self.fake_y_train)
def score_estimators(self):
"""
Get F1 scores of self.r_estimators and self.f_estimators on the fake and real data, respectively.
:return: dataframe with the results for each estimator on each data test set.
"""
if self.target_type == 'class':
rows = []
for r_classifier, f_classifier, estimator_name in zip(self.r_estimators, self.f_estimators,
self.estimator_names):
for dataset, target, dataset_name in zip([self.real_x_test, self.fake_x_test],
[self.real_y_test, self.fake_y_test], ['real', 'fake']):
predictions_classifier_real = r_classifier.predict(dataset)
predictions_classifier_fake = f_classifier.predict(dataset)
f1_r = f1_score(target, predictions_classifier_real, average='micro')
f1_f = f1_score(target, predictions_classifier_fake, average='micro')
jac_sim = jaccard_score(predictions_classifier_real, predictions_classifier_fake, average='micro')
row = {'index': f'{estimator_name}_{dataset_name}', 'f1_real': f1_r, 'f1_fake': f1_f,
'jaccard_similarity': jac_sim}
rows.append(row)
results = pd.DataFrame(rows).set_index('index')
elif self.target_type == 'regr':
r2r = [rmse(self.real_y_test, clf.predict(self.real_x_test)) for clf in self.r_estimators]
f2f = [rmse(self.fake_y_test, clf.predict(self.fake_x_test)) for clf in self.f_estimators]
# Calculate test set accuracies on the other dataset
r2f = [rmse(self.fake_y_test, clf.predict(self.fake_x_test)) for clf in self.r_estimators]
f2r = [rmse(self.real_y_test, clf.predict(self.real_x_test)) for clf in self.f_estimators]
index = [f'real_data_{classifier}' for classifier in self.estimator_names] + \
[f'fake_data_{classifier}' for classifier in self.estimator_names]
results = | pd.DataFrame({'real': r2r + f2r, 'fake': r2f + f2f}, index=index) | pandas.DataFrame |
import pytest
from pymanda import ChoiceData, DiscreteChoice
import pandas as pd
import numpy as np
@pytest.fixture
def psa_data():
'''create data for psa analysis'''
#create corporations series
corps = ['x' for x in range(50)]
corps += ['y' for x in range(25)]
corps += ['z' for x in range(25)]
#create choice series
#corp x
choices = ['a' for x in range(30)]
choices += ['b' for x in range(20)]
#corp y
choices += ['c' for x in range(20)]
choices += ['d' for x in range(5)]
#corp z
choices += ['e' for x in range(25)]
# create zips
#corp x
zips = [1 for x in range(20)] #in 75 psa
zips += [2 for x in range(10)] #in 75 psa
zips += [3 for x in range(8)] #in 75 psa
zips += [4 for x in range(7)] #in 90 psa
zips += [5 for x in range(3)] # in 90 psa
zips += [6 for x in range(2)] # out of psa
#corp y
zips += [7 for x in range(10)] #in 75 psa
zips += [8 for x in range(9)] #in 75 psa
zips += [3 for x in range(4)] #in 90 psa
zips += [5 for x in range(2)] #out of psa
#corp z
zips += [7 for x in range(10)] #in 75 psa
zips += [10 for x in range(9)] #in 75 psa
zips += [3 for x in range(4)] #in 90 psa
zips += [9 for x in range(1)] #out of psa
zips += ["" for x in range(1)] #out of psa
psa_data = pd.DataFrame({'corporation': corps,
'choice' : choices,
"geography": zips})
return psa_data
@pytest.fixture()
def onechoice_data():
choices = ['a' for x in range(100)]
zips = [1 for x in range(20)]
zips += [2 for x in range(20)]
zips += [3 for x in range(20)]
zips += [4 for x in range(20)]
zips += [5 for x in range(20)]
wght = [1.5 for x in range(20)]
wght += [1 for x in range(20)]
wght += [.75 for x in range(20)]
wght += [.5 for x in range(20)]
wght += [.25 for x in range(20)]
onechoice_data = pd.DataFrame({'choice': choices,
'geography': zips,
'weight' : wght})
return onechoice_data
## Tests for ChoiceData Initialization
def test_BadInput():
'''Test for error catching bad input'''
with pytest.raises(TypeError):
ChoiceData(["bad", "input"])
def test_AllMissing():
'''test for empty dataframe'''
df_empty = pd.DataFrame({'corporation': [],
'choice' : [],
"geography": []})
with pytest.raises(ValueError):
ChoiceData(df_empty, 'choice', corp_var='corporation', geog_var='geography')
def test_ChoiceMissing(psa_data):
'''test for an observation missing choice'''
df_miss = pd.DataFrame({'corporation': [""],
'choice' : [""],
"geography": [""]})
df_miss = | pd.concat([df_miss, psa_data]) | pandas.concat |
# coding: utf-8
import pandas as pd
from collections import defaultdict
def main(args):
clustering = | pd.read_table(args.clustering_file, sep=',', names=['contig_id', 'cluster_id'], index_col=0) | pandas.read_table |
# this will be the main program for inspecting TESS light curves for stellar rotation
# Import relevant modules
#%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib
import matplotlib.gridspec as gridspec
#from astropy.visualization import astropy_mpl_style
from glob import glob
from astropy.io import fits
import warnings
warnings.filterwarnings('ignore')
#from astroquery.vizier import Vizier
#from astropy.coordinates import SkyCoord
#import astropy.units as u
from astropy.timeseries import LombScargle
import os
import sys
############# If you move these programs you will need to update these directories and names #############
sys.path.append('/content/gdrive/My Drive/')
from tess_check import myDir as myDir
import time
####################################################################################
# Load sheet
def load_sheet(project_name):
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
dir_project = myDir.project_dir(project_name)
sheet_id_file = os.path.join(dir_project,f"{project_name}.txt")
f = open(sheet_id_file,"r")
doc_id = f.readline()
f.close()
sheet = gc.open_by_key(doc_id)
return sheet
def list_to_string(list,delim=' '):
list_as_string = list[0]
for i in range(len(list)-1):
id = i+1
list_as_string += (delim + list[id])
return list_as_string
# Auto run
def tesscheck_auto(project_name, tess_cycle=1, redo=False):
#cluster = 'ComaBer'
user = 'Auto'
target_table = get_prot_table('Auto',project_name)
target_data_val = target_table.get_all_values()
target_data = pd.DataFrame.from_records(target_data_val[1:],columns=target_data_val[0])
if tess_cycle == 1:
cycle_sectors = ['1','2','3','4','5','6','7','8','9','10','11','12','13']
if tess_cycle == 2:
cycle_sectors = ['14', '15', '16','17','18','19','20','21','22','23','24','25','26']
print('Assembling target list...')
#star_list = stars_todo(target_table)
star_list = stars_todo(target_data)
number_stars = len(star_list)
print(str(number_stars)+' stars to analyze')
if number_stars == 0:
print('no stars remaining.')
else:
for i in range(number_stars):
if star_list[i][0] != 'n':
star = make_star(target_data,star_list[i])
star = make_star(target_data,star_list[i])
print(str(i)+' '+star_list[i])
tstar = initiate_star(star,project_name,user=user)
tstar['which_sectors'] = cycle_sectors
display_tess_lite_v2(tstar, save = False, noplot = True)
update_prot_table(target_table, tstar)
return
####################################################################################
# Main program
def tesscheck_run_v1():
# Identify the user
user = tess_user()
# Load the Prot table
prot_table = get_prot_table(user)
# rows = prot_table.get_all_values()
# print(rows)
cluster = 'NGC_7092'
file = glob('/content/gdrive/My Drive/Tables/'+cluster+'-Catalog.csv')
clu = pd.read_csv(file[0])
gmag = clu['GMAG']
bprp = clu['BP_RP']
RA = clu['RA_ICRS']
Dec = clu['DE_ICRS']
# star = clu.iloc[121]
star = clu.iloc[276]
tstar = initiate_star(star,cluster,user=user)
#display_tess_lite(tstar, save = True)
display_tess_lite_v2(tstar, save = False, noplot = True)
update_prot_table(prot_table, tstar, user)
return tstar
####################################################################################
# Identify the User
def tess_user(project_name):
#dir_project = project_dir(project_name)
status = read_status(project_name)
#file_status = glob(dir_project + 'Status.txt')
#file_open = open(file_status[0], "r")
#lines = file_open.readlines()
#user_line = lines[8]
users = status['Users'].split(' ')
#users = users[1:-1]
number_of_users = len(users)
print('Which user? Press...')
for i in range(number_of_users):
print(' '+str(i+1)+' for '+users[i])
print(' '+str(i+2)+' for Other')
# while loop
# val = input("Enter number for your name: ")
val = input()
user = ''
#user_found = 0
if val.isdigit() == False:
print('No user selected.')
user = 'None'
return user
else:
# is the number in the range?
if (float(val) > (number_of_users+1)) | (float(val) == 0):
print('Out of bounds')
user = 'Noone'
return user
if (float(val) <= (number_of_users)) & (float(val) != 0):
id = int(val)
user = users[id-1]
print(user + ' is logged in.')
add_user_to_sheet(project_name, user)
return user
if float(val) == (number_of_users+1):
print('Other selected. Need to make sheet and update Status.txt')
print('Other: What name?')
other_name = input()
other_name = other_name.replace(" ", "")
other_name = other_name.lower()
other_name = other_name.capitalize()
#make_status(project_name, add_user=other_name, add_step = '', change_auto='', reset=False):
user = other_name # prompt for it
iu = np.where(np.array(users) == user)
if np.size(iu) > 0:
print('User already exists, logging in.')
add_user_to_sheet(project_name, user)
return user
else:
make_status(project_name, add_user=user, add_step = '', change_auto='', reset=False)
add_user_to_sheet(project_name, user)
print(user + ' profile is created and user is logged in.')
return user
####################################################################################
def get_prot_table(user,project_name):
sheet_name = project_name
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
# load the table
worksheet = load_sheet(sheet_name)
if user == 'Auto':
table = worksheet.worksheet('Targets')
else:
table = worksheet.worksheet(user)
return table
####################################################################################
# Functions to locate a star's TESS data
def find_ffi(star, cluster):
# homedir = os.path.expanduser("~")
dir_ffi = myDir.project_dir(cluster)+'FFI/'
RA = str(star["RA_ICRS"])[:7]
DE = str(star["DE_ICRS"])[:7]
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*/*.fits")
if len(file_ffi) == 0:
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*.fits")
return(file_ffi)
def find_ffi_coord(ra, dec, cluster):
dir_ffi = myDir.project_dir(cluster)+'FFI/'
RA = str(ra)[:7]
DE = str(dec)[:7]
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*/*.fits")
if len(file_ffi) == 0:
file_ffi = glob(dir_ffi+"*"+RA+"*"+DE+"*.fits")
return(file_ffi)
def find_sap(star, cluster):
dir_sap = myDir.project_dir(cluster)+'SAP/'
if star["DR2NAME"][0] == 'G':
star_name = star["DR2NAME"][9:]
else:
star_name = star["DR2NAME"]
file_sap = glob(dir_sap + "*" + star_name + "*.csv")
return(file_sap)
def find_cpm(star, cluster):
dir_cpm = myDir.project_dir(cluster)+'CPM/'
if star["DR2NAME"][0] == 'G':
star_name = star["DR2NAME"][9:]
else:
star_name = star["DR2NAME"]
file_cpm = glob(dir_cpm + "*" + star_name + "*.csv")
return(file_cpm)
def load_cpm_fromfile(file):
lc = pd.read_csv(file)
return lc
def load_cpm(star):
lc = pd.read_csv(star['file_cpm'][0])
return lc
def load_sap(star):
lc = pd.read_csv(star['file_sap'][0])
return lc
def load_ffi_fromfile(file):
ffi_data = fits.open(file)
images = ffi_data[1].data
image = images[100]['Flux']
if np.max(image) == 0:
image = images[500]['Flux']
return image
def load_ffi(star):
ffi_data = fits.open(star['file_ffi'][0])
images = ffi_data[1].data
image = images[100]['Flux']
if np.max(image) == 0:
image = images[500]['Flux']
return image
def load_ffis(star):
ffi_data = fits.open(star['file_ffi'][0])
images = ffi_data[1].data
image = images['Flux']
return image
####################################################################################
def make_star(target_data, dr2_now):
star = {'DR2NAME': '',
'RA_ICRS': 0.,
'DE_ICRS': 0.,
'GMAG': 0.,
'BP_RP': 0.}
iloc = np.where(dr2_now == target_data['DR2Name'])
id = iloc[0][0]
star['DR2NAME'] = dr2_now
star['RA_ICRS'] = target_data['RA'][id]
star['DE_ICRS'] = target_data['Dec'][id]
star['GMAG'] = target_data['Gmag'][id]
star['BP_RP'] = target_data['BP_RP'][id]
return star
####################################################################################
def initiate_star(star, cluster, user='NONE', blank=False):
star_data = {'User' : user,
'Source': '',
'RA':0.,'Dec':0.,
'Gmag':0., 'gbr':0.,
#'Cluster':star['Cluster'],
'Cluster':cluster,
# data files
'file_ffi':'', 'file_cpm':'', 'file_sap':'', 'file_cdips':'',
'exist_ffi': 0, 'exist_cpm':0, 'exist_cdips':0, 'exist_sap':0,
'SAP_line':False,
'number_sectors':0,
'which_sectors':[''],
'sector_list':[''],
# lc arrays
# LC option and Period results
'which_LC':'CPM', # default is CPM
'Prot_LS':0., 'is_it_double':0, # the Lomb-Scargle period and if it should be doubled
'Power_LS':0., # the Lomb-Scargle period and if it should be doubled
'Prot_final':0.,
'Amplitude':0.,
'Multi':0, # if it is multi, then set this to multi=1
'Flares':0,
'Notes':'', # anything noteworthy about this star?
'LC_Quality':1, # 1 = modulate, 0 is flat, -1 is garbage
'LC_Action':'', #
# Plotting options
'x_min':0.0,'x_max':0.0, # time range
'y_min':0.0,'y_max':0.0, # flux range, will be calculated during first iteration, and then adjusted by user
# LS options
'pmin':0.1,'pmax':30., # default period range for LS analysis
'pxlog':0
}
if blank == True:
return star_data
star_data['Source'] = star["DR2NAME"]
star_data['RA'] = star["RA_ICRS"]
star_data['Dec'] = star["DE_ICRS"]
star_data['Gmag'] = star["GMAG"]
star_data['gbr'] = star["BP_RP"]
# Once the blank data dictionary is created, test/load data into it
exist = np.array([0,0,0,0])
file_ffi = find_ffi(star,cluster)
file_cpm = find_cpm(star,cluster)
file_sap = find_sap(star,cluster)
file_cdips = ''
#file_cdips = find_cdips(star)
if len(file_ffi) > 0:
exist[0] = 1
# star_data['ffi_image'] = load_ffi(file_ffi)
star_data['file_ffi'] = file_ffi
star_data['exist_ffi'] = 1
if len(file_cpm) > 0:
exist[1] = 1
#lc_cpm = load_cpm(file_cpm)
star_data['file_cpm'] = file_cpm
star_data['exist_cpm'] = 1
if len(file_sap) > 0:
exist[2] = 1
#lc_cpm = load_cpm(file_cpm)
star_data['file_sap'] = file_sap
star_data['exist_sap'] = 1
if len(file_cdips) > 0:
exist[3] = 1
#lc_cdips = load_cdips(file_cdips)
star_data['file_cdips'] = file_cdips
star_data['exist_cdips'] = 1
if exist.sum() == 0:
print('No data for this star')
return star_data
else:
return star_data
####################################################################################
# modified version of the display program. needs to be copied into tesscheck.py
def display_tess_lite_v2(tstar,save = False,noplot = False):
time_1 = time.time()
# plotting defaults
axis_fontsize = 16
import matplotlib.pylab as pylab
params = {'axes.labelsize': 16,'axes.titlesize': 16,'xtick.labelsize': 14,'ytick.labelsize': 14}
pylab.rcParams.update(params)
#cpm data for star
if tstar['exist_cpm'] == 0:
return
if tstar['which_LC'] == 'CPM':
lc_cpm = load_cpm(tstar)
# if tstar['which_sectors'] != 'All':
# ifin = np.where((np.isfinite(lc_cpm['flux']) == True) & (lc_cpm['sector'] == int(tstar['which_sectors'])))
# if np.size(ifin) == 0:
# print('sector not available, reverting back to All')
# ifin = np.where(np.isfinite(lc_cpm['flux']) == True)
# tstar['which_sectors'] = 'All'
time_all = lc_cpm['time']
flux_all = lc_cpm['flux']
sector_all = lc_cpm['sector']
lc_title = 'TESS Light Curve (Calibrated with Causal Pixel Modeling)'
if tstar['which_LC'] == 'SAP':
lc_sap = load_sap(tstar)
time_all = lc_sap['time']
flux_all = lc_sap['flux']
flux_all /= np.nanmedian(flux_all)
flux_all -= 1
sector_all = lc_sap['sector']
lc_title = 'TESS Light Curve (Extracted with Simple Aperture Photometry)'
# what if we say just load the whle thing, whether SAP or CPM, then we handle the sectors...
ifin = np.where(np.isfinite(flux_all)) #find infs
unique_sectors = np.unique(sector_all).astype(int) #all the sectors for each data point from table
unique_sectors = list(map(str,unique_sectors)) #unique instances
# save into
tstar['sector_list'] = unique_sectors #unique sectors saved as a list
length_all = len(flux_all)
use_these = np.zeros(length_all)
length_which_sectors = len(tstar['which_sectors']) #tstars['which_sectors'] is blank until this runs once,
if tstar['which_sectors'] != ['']: #skips this on first run when it's blank
for index_sectors in range(length_which_sectors):
id_sector_match = np.where((float(tstar['which_sectors'][index_sectors]) == sector_all) & (np.isfinite(flux_all) == True))
if len(id_sector_match[0]) > 0:
use_these[id_sector_match[0]] = 1
ifin = np.where(use_these == 1)
if len(ifin[0]) == 0:
ifin = np.where(np.isfinite(flux_all) == True)
print('all points ruled out, reverting to all points')
use_these = np.zeros(length_all)
use_these[ifin[0]] = 1
if float(tstar['y_max'])>0:
# print('trimming')
#ifin = np.where((flux>float(tstar['y_min'])) & (flux<float(tstar['y_max'])))
iyra = np.where(abs(100*flux_all)>float(tstar['y_max']))
if len(iyra[0]) > 0:
use_these[iyra[0]] = 0
if ((tstar['x_min'] != 0) | (tstar['x_max'] != 0)):
ixra = np.where((time_all-min(time_all) < float(tstar['x_min'])) | (time_all-min(time_all) > float(tstar['x_max'])))
if len(ixra[0]) > 0:
use_these[ixra[0]] = 0
# ifin = np.where(np.isfinite(flux_all) == True)
# use_these = np.zeros(length_all)
# use_these[ifin[0]] = 1
iuse = np.where(use_these == 1)
if len(iuse[0]) == 0:
print('what happened?')
times = time_all[iuse[0]]
flux = flux_all[iuse[0]]
sectors = sector_all[iuse[0]]
sectors_used = np.unique(sectors).astype(int)
sectors_used = list(map(str,sectors_used))
if (tstar['which_LC'] == 'SAP') & (tstar['SAP_line'] == True):
slope, intercept = np.polyfit(times, flux, 1)
sap_line = slope * times + intercept
flux -= sap_line
#Periodogram Setup
Pmin = tstar['pmin']
Pmax = tstar['pmax']
Fmin = 1/Pmax
Fmax = 1/Pmin
# freq_cpm, pow_cpm = LombScargle(lc_cpm["time"], lc_cpm["flux"]).autopower(minimum_frequency=Fmin,maximum_frequency=Fmax)
# naf= np.array(1/freq_cpm)
# nap= np.array(pow_cpm)
# maX = np.argmax(nap)
# period = (naf[maX])
time_2 = time.time()
periods_cpm = np.logspace(np.log10(Pmin),np.log10(Pmax),10000)
freq_cpm = 1/periods_cpm
pow_cpm = LombScargle(times, flux).power(freq_cpm)
period = periods_cpm[np.argmax(pow_cpm)]
tstar['Prot_LS'] = period
tstar['Power_LS'] = np.max(pow_cpm)
# Amplitude measurement
perc05 = np.percentile(flux,5)
perc95 = np.percentile(flux,95)
amp = float(perc95-perc05)
tstar['Amplitude'] = amp
# check if double
# read which_LC, then store in that period.
# store these in star, update Prot_final
mdub = float(1.0)
if tstar['is_it_double'] == 1:
mdub = float(2.0)
period_final = float(tstar['Prot_LS']*mdub)
tstar['Prot_final'] = period_final
#Figure creation
if noplot == False:
panel = plt.figure(constrained_layout=True, figsize= (16,11))
gs = gridspec.GridSpec(100, 100)
#cpm lightcurve
# how many sectors?
#all_sectors = lc_cpm['sector'].unique().astype(int)
# unique_sectors = sector_all.unique().astype(int)
# all_sectors = sectors # I'm pretty sure I reran CPM so that this isn't an issue anymore. Bad sectors arent in the CPM file.
n_sec = len(sectors_used) # this should probably be number used
n_all_sec = len(unique_sectors)
tstar['number_sectors'] = n_sec
primary_colors = ['b','r']
color_options = []
for icol in range(n_sec):
color_options.append(primary_colors[icol%2])
n_obs = len(sectors)
colors = np.repeat('r', n_obs)
for i in range(n_sec):
id = np.where(np.array(sectors) == float(sectors_used[i]))
colors[id] = color_options[i]
tmin = np.min(times)
if noplot == False:
cpmlight = panel.add_subplot(gs[0:40, 0:100])
cpmlight.set_title(lc_title)
cpmlight.scatter(times-tmin,flux*100,c = colors,s=15)
cpmlight.set_xlabel('Day of Observation')
cpmlight.set_ylabel('Percent Change in Brightness')
#find midpoint in time array to place amplitude
amp_time = np.mean(times-tmin)
#plot amplitude
cpmlight.plot([amp_time,amp_time],[-amp*100/2,amp*100/2],c='purple')
if float(tstar['y_max'])>0:
cpmlight.set_ylim(float(tstar['y_min']),float(tstar['y_max']))
# if ((float(tstar['x_min']) != 0) | (float(tstar['x_max']) != 0)):
# cpmlight.set_xlim(float(tstar['x_min'])-0.5,float(tstar['x_max'])+0.5)
#Mark adding a text for each sector as it is plotted
bot, top = cpmlight.get_ylim() #find the upper limit for us to put text
for x in sectors.index:
if x == sectors.index.min():
cpmlight.text(times[x]-tmin, top*.9, str(int(sectors[x]))) #put sector number for first sector
cur_sector = sectors[x]
else:
if sectors[x]!=cur_sector:
cpmlight.text(times[x]-tmin, top*.9, str(int(sectors[x]))) #put sector number for each subsequent sector
cur_sector = sectors[x]
# Phased light curve
#cpm_phase = panel.add_subplot(gs[55:, :40])
if noplot == False:
cpm_phase = panel.add_subplot(gs[55:, 65:])
cpm_phase.set_title('Phased Light Curve')
cpm_phase.scatter(times%period_final,flux*100,c=colors,s=7)
cpm_phase.set_xlabel('Day in Rotation Cycle')
#cpm_phase.set_ylabel('Percent Change in Brightness')
if float(tstar['y_max'])>0:
cpm_phase.set_ylim(float(tstar['y_min']),float(tstar['y_max']))
#cpm periodogram
if noplot == False:
#cpmper = panel.add_subplot(gs[55:,32:60])
cpmper = panel.add_subplot(gs[55:,34:60])
cpmper.set_title('Periodogram')
cpmper.plot(1/freq_cpm, pow_cpm, color = 'black')
cpmper.set_xlabel('Period (days)')
cpmper.set_ylabel('Power')
if tstar['Power_LS']<0.1:
cpmper.set_yscale('log')
cpmper.set_ylim(0.001,1)
if tstar['pxlog'] == 1:
cpmper.set_xscale('log')
# cpmper.plot([tstar['Prot_final'],tstar['Prot_final']],[0,1],c='red')
cpmper.plot(tstar['Prot_final'],0.95,marker='v',markerfacecolor='red',markersize=20,markeredgecolor="black")
# print(cpmlight.get_xlim())
# print('First panels: '+str(time.time()-time_3))
# First panels: 0.05 seconds
#FFI image
time_4 = time.time()
if noplot == False:
# if (tstar['which_sectors'] == 'All'):
ffi_image = load_ffi_fromfile(tstar['file_ffi'][0])
if (n_all_sec > 1) & (ffi_test(ffi_image) == 0):
print('switching sector')
ffi_image = load_ffi_fromfile(tstar['file_ffi'][1])
if (tstar['which_sectors'] != 'All') & (np.size(tstar['file_ffi'])>1):
if tstar['which_sectors'] == '15':
ffi_image = load_ffi_fromfile(tstar['file_ffi'][0])
if tstar['which_sectors'] == '16':
ffi_image = load_ffi_fromfile(tstar['file_ffi'][1])
ffimage = panel.add_subplot(gs[55:, 0:25])
ffimage.set_title('TESS Cutout Image')
color_map = plt.cm.get_cmap('gray')
reversed_color_map = color_map.reversed()
ffi_mod = np.clip(ffi_image-np.min(ffi_image),0,1000)
ffimage.imshow(ffi_mod,origin = 'lower',cmap=reversed_color_map)
ffimage.plot([15,17],[20,20],color='red')
ffimage.plot([23,25],[20,20],color='red')
ffimage.plot([20,20],[15,17],color='red')
ffimage.plot([20,20],[23,25],color='red')
ffimage.set_xlabel('Pixels')
ffimage.set_ylabel('Pixels')
if save == True:
# dir_panels = '/content/gdrive/My Drive/TESS/'+tstar['Cluster']+'/Panels/'
#dir_panels = '/content/gdrive/My Drive/Plots/Panels_Final/'
dir_panels = myDir.project_dir(tstar['Cluster'])+'Panels/'
end = '-User='+tstar['User']+'.png'
if tstar['Source'][0] == 'G':
dr2 = tstar['Source'][9:]
else:
dr2 = tstar['Source']
file_save = dir_panels+'GaiaDR2_'+dr2+end
panel.savefig(file_save, dpi=300, bbox_inches='tight', pad_inches=0.25, transparent=False)
# print('Display time:' + str(time.time() - time_1))
return tstar
####################################################################################
def update_prot_table(table, tstar):
user = tstar['User']
# (1) where is the star in the table?
cell = table.find(tstar['Source'])
# print("Found something at R%sC%s" % (cell.row, cell.col))
row_number = cell.row
if user == 'Auto':
columns = ['Prot','Prot_LS', 'Power_LS', 'TESS_Data']
n_col = len(columns)
cols = []
for column in columns:
cell = table.find(column)
cols.append(cell.col)
cell_list = [table.cell(row_number,cols[0]),table.cell(row_number,cols[1]),table.cell(row_number,cols[2]),table.cell(row_number,cols[3])]
cell_list[1].value = tstar['Prot_LS']
cell_list[2].value = tstar['Power_LS']
if (tstar['exist_ffi'] == 0) or (tstar['exist_cpm'] == 0):
cell_list[0].value = '-1'
cell_list[3].value = 'No'
else:
cell_list[0].value = ''
cell_list[3].value = 'Yes'
table.update_cells(cell_list)
#added amplitude column, and sector_list
if user != 'Auto':
columns = ['Prot_Final','Prot_LS', 'Power_LS', 'Single_Double', 'Multi', 'Quality', 'LC_Source', 'Class', 'Notes', 'Amp','Sectors_Used','Flares']
n_col = len(columns)
cols = [2,3,4,5,6,7,8,9,10,11,12,13]
# for column in columns:
# cell = table.find(column)
# cols.append(cell.col)
cell_range = 'B'+str(row_number)+':M'+str(row_number)
cell_list = table.range(cell_range)
if tstar['LC_Action'] == 'Publish':
cell_list[0].value = tstar['Prot_final']
if tstar['LC_Action'] == 'Good':
cell_list[0].value = tstar['Prot_final']
if tstar['LC_Action'] == 'Follow up':
cell_list[0].value = -tstar['Prot_final']
if tstar['LC_Action'] == 'Flat':
cell_list[0].value = 99
if tstar['LC_Action'] == 'Garbage':
cell_list[0].value = -99
cell_list[1].value = tstar['Prot_LS']
cell_list[2].value = tstar['Power_LS']
cell_list[3].value = tstar['is_it_double']
cell_list[4].value = tstar['Multi']
cell_list[5].value = tstar['LC_Quality']
cell_list[6].value = tstar['which_LC']
cell_list[7].value = tstar['LC_Action']
cell_list[8].value = tstar['Notes']
cell_list[9].value = tstar['Amplitude']
cell_list[10].value = str(tstar['which_sectors'])
cell_list[11].value = tstar['Flares']
table.update_cells(cell_list)
####################################################################################
def stars_todo(table):
n_stars = len(table)
not_done = np.zeros(n_stars, dtype=int)
for i in range(n_stars):
if len(table['Prot_LS'][i]) == 0:
not_done[i] = 1
ido = np.where(not_done == 1)
dr2_list = table['DR2Name'].to_numpy()
star_list = dr2_list[ido[0]]
return star_list
def stars_todo_split(table, user):
n_stars = len(table)
not_done = np.zeros(n_stars, dtype=int)
for i in range(n_stars):
if len(table['Prot_LS'][i]) == 0:
not_done[i] = 1
ido = np.where(not_done == 1)
list = ido[0]
if user == 'Angeli':
these = np.where(ido[0] < 202)
list = ido[0][these]
if user == 'Isabella':
these = np.where((ido[0] > 191) & (ido[0] < 379))
list = ido[0][these]
if user == 'Linus':
these = np.where(ido[0] > 373)
list = ido[0][these]
dr2_list = table['DR2Name'].to_numpy()
star_list = dr2_list[list]
return star_list
####################################################################################
def stars_nodata():
target_table = get_prot_table('Auto')
target_data = target_table.get_all_records()
dr2_list = target_table.col_values(1)
dr2 = np.array(dr2_list[1:])
num = np.size(dr2)
# load their table
user_table = get_prot_table('Jason')
# Identify columns
# Locate Prot_LS column
cell = user_table.find('Prot_LS')
col_prot = cell.col
# Locate Power LS column
cell = user_table.find('Power_LS')
col_pow = cell.col
# Loop over targets
for i in range(num):
row_number = i+2
# Does target have TESS data, according to target table?
val = target_data[i]['Prot_LS']
if val == 0:
user_table.update_cell(row_number,col_prot,0)
user_table.update_cell(row_number,col_pow,0)
####################################################################################
def stars_nodata_new(Team):
target_table = get_prot_table('Auto')
target_data = target_table.get_all_records()
dr2_list = target_table.col_values(1)
dr2 = np.array(dr2_list[1:])
num = np.size(dr2)
# load their table
user_table = get_prot_table('Jason')
# Identify columns
# Locate Prot_LS column
cell = user_table.find('Prot_LS')
col_prot = cell.col
# Locate Power LS column
cell = user_table.find('Power_LS')
col_pow = cell.col
# Loop over targets
for i in range(num):
row_number = i+2
# Does target have TESS data, according to target table?
val = target_data[i]['Prot_LS']
if val == 0:
user_table.update_cell(row_number,col_prot,0)
user_table.update_cell(row_number,col_pow,0)
####################################################################################
def ffi_test(ffi):
shape = np.shape(ffi)
val = ffi[int(shape[0]/2),int(shape[1]/2)]
if np.isfinite(val):
good_or_bad = 1
else:
good_or_bad = 0
return good_or_bad
####################################################################################
def tess_inspect_not_working(tstar):
import ipywidgets as widgets
from ipywidgets import interactive
from IPython.display import display
import matplotlib.pylab as pylab
lc_cpm = load_cpm(tstar)
all_sectors = lc_cpm['sector'].unique().astype(int)
sectors = sector.unique().astype(int)
thing_widget = widgets.SelectMultiple(
options=sectors,
value=sectors,
#rows=10,
description='Sectors',
disabled=False
)
interactive_plot = interactive(idisplay_tess, thing=thing_widget,double=False)
output = interactive_plot.children[-1]
interactive_plot
idisplay()
return
####################################################################################
####################################################################################
def update_panelname_v1(tstar, locate=False):
import os
from glob import glob
dir_panels = '/content/gdrive/My Drive/Projects/'+tstar['Cluster']+'/Panels/'
name = dir_panels + '*'+tstar['Source'][9:]+'*'
file = glob(name)
if locate == True:
return np.size(file)
else:
end = '-User='+tstar['User']+'-Review='+tstar['LC_Action']+'.png'
new_file = dir_panels + 'GaiaDR2_'+str(tstar["Source"])[9:]+end
os.rename(file[0],new_file)
def update_panelname(tstar, locate=False):
import os
from glob import glob
dir_panels = myDir.project_dir(tstar['Cluster'])+'Panels/'
if tstar['Source'][0] == 'G':
dr2 = tstar['Source'][9:]
else:
dr2 = tstar['Source']
name = dir_panels + '*'+dr2+'*'+tstar['User']+'*'
file = glob(name)
if locate == True:
return np.size(file)
else:
end = '-User='+tstar['User']+'-Review='+tstar['LC_Action']+'.png'
new_file = dir_panels + 'GaiaDR2_'+dr2+end
os.rename(file[0],new_file)
#############################################################################
def prot_show_v1(project_name, user, gbr, clusters=False):
# gbr = target_data['BP_RP'].to_numpy(dtype=float)
fig1, ax1 = plt.subplots(figsize=(7,6))
ax1.tick_params(axis='both', which='major', labelsize=15)
aw = 1.5
ax1.spines['top'].set_linewidth(aw)
ax1.spines['left'].set_linewidth(aw)
ax1.spines['right'].set_linewidth(aw)
ax1.spines['bottom'].set_linewidth(aw)
prot_table_now = get_prot_table(user,project_name)
prot_data_val_now = prot_table_now.get_all_values()
prot_data_now = pd.DataFrame.from_records(prot_data_val_now[1:],columns=prot_data_val_now[0])
pnow = prot_data_now['Prot_Final'].to_numpy()
uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan'))
prot_now = pnow[uu[0]]
color = gbr[uu[0]].astype(float)
ax1.set_xlim(0.4,2.5)
ax1.set_xlabel('BP - RP (mag)',fontsize=20)
ax1.set_ylim(0,20)
ax1.set_ylabel('Rotation Period (days)',fontsize=20)
if clusters == True:
file = glob('/content/gdrive/My Drive/Tables/gyro_clusters_draft-2020April08.csv')
clus = pd.read_csv(file[0])
indicesPl = np.where((clus["CLUSTER"] == "Pleiades") & (clus['BENCH'] == 1))
indicesPr = np.where((clus["CLUSTER"] == "Praesepe") & (clus['BENCH'] == 1))
#indicesNGC = np.where((Cluster == "NGC_6811") & (clus['BENCH'] == 1))
pleiades = clus.iloc[indicesPl]
praesepe = clus.iloc[indicesPr]
#NGC6811 = clus.iloc[indicesNGC]
plt.plot(pleiades["BP_RP"]-0.415*0.12, pleiades["PROT"], markerfacecolor = 'blue', markeredgecolor='black', label = '120 Myr Pleiades',markersize=10,alpha=0.7,linestyle='',marker='.')
plt.plot(praesepe["BP_RP"]-0.415*0.035, praesepe["PROT"], markerfacecolor = 'cyan', markeredgecolor='black', label = '670 Myr Praesepe',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(color, np.array(prot_now,dtype=float),markerfacecolor='red',markeredgecolor='black',marker='*',markersize=20,linestyle='')
plt.show()
def prot_show(project_name, user, gbr, clusters=False, pcut=0.0):
# gbr = target_data['BP_RP'].to_numpy(dtype=float)
fig1, ax1 = plt.subplots(figsize=(15,9))
ax1.tick_params(axis='both', which='major', labelsize=15)
aw = 1.5
ax1.spines['top'].set_linewidth(aw)
ax1.spines['left'].set_linewidth(aw)
ax1.spines['right'].set_linewidth(aw)
ax1.spines['bottom'].set_linewidth(aw)
prot_table_now = get_prot_table(user,project_name)
prot_data_val_now = prot_table_now.get_all_values()
prot_data_now = pd.DataFrame.from_records(prot_data_val_now[1:],columns=prot_data_val_now[0])
pnow = prot_data_now['Prot_Final'].to_numpy()
qnow = prot_data_now['Quality'].to_numpy()
cnow = prot_data_now['Class'].to_numpy()
uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan') & (qnow != '-1') & (cnow == 'Accept'))
# uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan'))
prot_now = pnow[uu[0]]
color = gbr[uu[0]].astype(float)
power_now = prot_data_now['Power_LS'].to_numpy()
vv = np.where(power_now[uu[0]].astype(float)>pcut)
ax1.set_xlim(0.4,3.5)
ax1.set_xlabel('BP - RP (mag)',fontsize=20)
ax1.set_ylim(0,25)
ax1.set_ylabel('Rotation Period (days)',fontsize=20)
if clusters == True:
file = glob('/content/gdrive/My Drive/Tables/gyro_clusters_draft-2020April08.csv')
clus = pd.read_csv(file[0])
indicesPl = np.where((clus["CLUSTER"] == "Pleiades") & (clus['BENCH'] == 1))
indicesPr = np.where((clus["CLUSTER"] == "Praesepe") & (clus['BENCH'] == 1))
#indicesNGC = np.where((Cluster == "NGC_6811") & (clus['BENCH'] == 1))
pleiades = clus.iloc[indicesPl]
praesepe = clus.iloc[indicesPr]
#NGC6811 = clus.iloc[indicesNGC]
ax1.plot(pleiades["BP_RP"]-0.415*0.12, pleiades["PROT"], markerfacecolor = 'blue', markeredgecolor='black', label = '120 Myr Pleiades',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(praesepe["BP_RP"]-0.415*0.035, praesepe["PROT"], markerfacecolor = 'cyan', markeredgecolor='black', label = '670 Myr Praesepe',markersize=10,alpha=0.7,linestyle='',marker='.')
ax1.plot(color[vv[0]], np.array(prot_now[vv[0]],dtype=float),markerfacecolor='red',markeredgecolor='black',marker='*',markersize=15,linestyle='')
print(len(vv[0]))
# ax1.scatter([1.2215], [11.6770],s=3000,c='green')
# ax1.plot([1.2375,1.2375],[0,20],c='green')
# ax1.plot([0.5,2.5],[11.677,11.677],c='green')
plt.show()
#############################################################################
def make_status(project_name, add_user='', add_step = '', change_auto='', reset=False):
# directory
dir_project = myDir.project_dir(project_name)
# ensure the file doesnt already exist
file_status = glob(dir_project + "Status.txt")
new_file = 1
if (np.size(file_status) == 1) | (reset == False):
bsize = os.path.getsize(file_status[0]) #size in bytes
if bsize < 40:
print('remove the file')
os.remove(file_status[0])
else:
new_file = 0
status = read_status(project_name)
if (new_file == 1) | (reset == True):
status = {'Project':project_name,
'Users':'Jason Team_Member',
'Steps':'Status_Initialized',
'Auto': 'No'}
if len(add_user) > 0:
status['Users'] += ' '+add_user
if len(add_step) > 0:
status['Steps'] += ' '+add_step
if len(change_auto) > 0:
status['Auto'] = change_auto
lines = []
# 1: project title
lines.append('Project: '+project_name+"\n")
# 2: Users
lines.append('Users: '+status['Users']+"\n")
# 3: Steps
lines.append('Steps: '+status['Steps']+"\n")
# 4: Has tesscheck_auto been run?
lines.append('tesscheck_auto: '+status['Auto']+"")
# 5: Which sectors?
# 6: Number of repeat sectors?
# Create the file
fo = open(dir_project + "Status.txt", "w")
fo.writelines(lines)
fo.close()
######################################
def read_status(project_name):
dir_project = myDir.project_dir(project_name)
# ensure the file doesnt already exist
file_status = glob(dir_project + "Status.txt")
if np.size(file_status) == 0:
make_status(project_name)
#print('no file')
return
bsize = os.path.getsize(file_status[0]) #size in bytes
if (bsize < 40):
os.remove(file_status[0])
make_status(project_name)
#print('no file')
return
fo = open(file_status[0], "r")
lines_current = fo.readlines()
fo.close()
# 2 Users
c_users_line = lines_current[1][0:-1]
c_users_split = c_users_line.split(sep=' ')
users_current = c_users_split[1:]
# 3 Steps
c_steps_line = lines_current[2][0:-1]
c_steps_split = c_steps_line.split(sep=' ')
steps_current = c_steps_split[1:]
# 4 Auto
c_line = lines_current[3]
c_split = c_line.split(sep=' ')
auto_current = c_split[1:]
status = {'Project':project_name,
'Users':list_to_string(users_current),
'Steps':list_to_string(steps_current),
'Auto':auto_current[0]}
return status
#############################################################################
def add_user_to_sheet(project_name, user):
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
sheet = load_sheet(project_name)
sheet_list = sheet.worksheets()
target_table = get_prot_table('Auto',project_name)
target_data_val = target_table.get_all_values()
target_data = pd.DataFrame.from_records(target_data_val[1:],columns=target_data_val[0])
dr2_list = target_data['DR2Name'].to_numpy()
number_of_stars = len(dr2_list)
sheet_exists = 0
for isheet in sheet_list:
if isheet.title == user:
sheet_exists = 1
if sheet_exists == 1:
print('This user has a sheet')
else:
print('Making sheet for new user...')
new_sheet = sheet.add_worksheet(title=user,rows=number_of_stars+1,cols=10)
columns = ['DR2Name', 'Prot_Final', 'Prot_LS', 'Power_LS', 'Single_Double', 'Multi', 'Quality', 'LC_Source', 'Class', 'Notes','Amp','Sectors_Used','Flares']
cell_range = 'A1:M1'
cell_list = new_sheet.range(cell_range)
i=0
for cell in cell_list:
cell.value = columns[i]
i+=1
new_sheet.update_cells(cell_list)
cell_range = 'A2:A'+str(number_of_stars+1)
cell_list = new_sheet.range(cell_range)
i=0
for cell in cell_list:
cell.value = target_data['DR2Name'][i]
i+=1
new_sheet.update_cells(cell_list)
cell_range = 'B2:B'+str(number_of_stars+1)
cell_list = new_sheet.range(cell_range)
i=0
for cell in cell_list:
if target_data['Prot'][i] == '-1':
cell.value = target_data['Prot'][i]
i+=1
new_sheet.update_cells(cell_list)
cell_range = 'C2:C'+str(number_of_stars+1)
cell_list = new_sheet.range(cell_range)
i=0
for cell in cell_list:
if target_data['Prot_LS'][i] == '0':
cell.value = target_data['Prot_LS'][i]
i+=1
new_sheet.update_cells(cell_list)
cell_range = 'D2:D'+str(number_of_stars+1)
cell_list = new_sheet.range(cell_range)
i=0
for cell in cell_list:
if target_data['Power_LS'][i] == '0':
cell.value = target_data['Power_LS'][i]
i+=1
new_sheet.update_cells(cell_list)
# print('Setting up a new user took '+str(time.time()-start_time)+' seconds')
def prot_auto_show(project_name, clusters=False, pcut=0.0, av=0.0):
fig1, ax1 = plt.subplots(figsize=(15,9))
ax1.tick_params(axis='both', which='major', labelsize=15)
aw = 1.5
ax1.spines['top'].set_linewidth(aw)
ax1.spines['left'].set_linewidth(aw)
ax1.spines['right'].set_linewidth(aw)
ax1.spines['bottom'].set_linewidth(aw)
worksheet = load_sheet(project_name)
target_table = worksheet.worksheet('Targets')
target_data_val = target_table.get_all_values()
target_data = pd.DataFrame.from_records(target_data_val[1:],columns=target_data_val[0])
gbr = target_data['BP_RP'].to_numpy(dtype=float)
pnow = target_data['Prot_LS'].to_numpy()
uu = np.where((pnow != '') & (pnow != '-1') & (gbr != 'nan'))
prot_now = pnow[uu[0]]
color = gbr[uu[0]].astype(float) - 0.415*av
power_now = target_data['Power_LS'].to_numpy()
vv = np.where(power_now[uu[0]].astype(float)>pcut)
ax1.set_xlim(0.4,3.5)
ax1.set_xlabel('$(BP - RP)_0$ (mag)',fontsize=20)
ax1.set_ylim(0,25)
ax1.set_ylabel('Rotation Period (days)',fontsize=20)
if clusters == True:
file = glob('/content/gdrive/My Drive/Tables/gyro_clusters_draft-2020April08.csv')
clus = | pd.read_csv(file[0]) | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = | pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.install_packages('pseudo')
#utils.install_packages('prodlim')
#utils.install_packages('survival')
#utils.install_packages('KMsurv')
#utils.install_packages('pROC')
import rpy2.robjects as robjects
from rpy2.robjects import r
def sim_event_times_case1(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
#split data
train_clindata_all = df.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_all['time'])
train_clindata_all = train_clindata_all.iloc[order_time,:]
test_clindata_all = df.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_all['time'])
event_r = robjects.BoolVector(train_clindata_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalenv["time_r"] = time_r
robjects.globalenv["event_r"] = event_r
robjects.globalenv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_all = train_clindata_all.assign(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_df = pd.melt(train_clindata_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_df.rename(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymap= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_df = long_df.applymap(lambda s : mymap.get(s) if s in mymap else s)
train_val_clindata = pd.get_dummies(long_df, columns=['time_point'])
test_clindata_all = test_clindata_all.assign( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_df = pd.melt(test_clindata_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_df.rename(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_all = pd.merge(left=long_test_df, right=test_clindata_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_all.columns.tolist()
long_test_clindata = long_test_clindata_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = pd.get_dummies(long_test_clindata, columns=['time_point'])
covariates = df[['ID'] + df.columns.tolist()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_all['time'], 'event_train': train_clindata_all['event'], 'slide_id_test': test_clindata_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case2(trainset, num_samples):
train_n = int( .8 * num_samples)
test_n = int( (.2) * num_samples)
cov = np.random.standard_normal(size=(num_samples, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_samples)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatenate((treatment, cov), axis=1)
index = np.arange(len(trainset.targets))
idx_sample = np.random.choice(index, num_samples,replace=False)
digits = np.array(trainset.targets)[idx_sample]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_samples) )/ denom )
denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_samples))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.sum(censored_indicator)/num_samples
cens_perc_train = np.sum(censored_indicator[:train_n])/train_n
df = np.concatenate((np.expand_dims(idx_sample,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
df = | pd.DataFrame(df,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.