filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2209 | from datetime import datetime
from utils.api import fetch_events
class Events:
def __init__(self):
self.items = []
def fetch(self, params):
self.items = []
params['pageToken'] = None
while True:
events = fetch_events(params)
if events and events.get('items'):
self.items.extend(events.get('items'))
page_token = events.get('nextPageToken')
if not page_token:
params['pageToken'] = page_token
break
return self.items
def to_csv(self, filename):
for obj in self.items:
item = EventItem(obj)
if item.is_cancelled():
continue
csv_line = '"{}","{}","{}","{}","{}"'.format(
item.get_summary(),
'1' if item.is_all_day() else '0',
item.get_start(),
item.get_end(),
item.get_total_minitues()
)
with open(filename, 'a') as f:
f.write(csv_line + '\n')
class EventItem:
def __init__(self, item):
self.item = item
def is_cancelled(self) -> bool:
return self.item.get('status') == 'cancelled'
def get_summary(self):
return self.item.get('summary')
def has_start(self):
return self.item.get('start') is not None
def get_start(self):
d = self.get_start_date()
if d != '':
return d
return self.get_start_datetime()
def get_start_date(self):
start = self.item.get('start')
if not start:
return ''
d = start.get('date')
if d:
return d
return ''
def get_start_datetime(self):
start = self.item.get('start')
if not start:
return ''
dt = start.get('dateTime')
if dt:
return dt
return ''
def is_all_day(self):
return self.get_start_date() != ''
def get_end(self):
d = self.get_end_date()
if d != '':
return d
return self.get_end_datetime()
def get_end_date(self):
end = self.item.get('end')
if not end:
return ''
d = end.get('date')
if d:
return d
return ''
def get_end_datetime(self):
end = self.item.get('end')
if not end:
return ''
dt = end.get('dateTime')
if dt:
return dt
return ''
def get_total_minitues(self):
if not self.has_start() or self.is_all_day():
return 0
start = datetime.fromisoformat(self.get_start_datetime())
end = datetime.fromisoformat(self.get_end_datetime())
return (end - start).total_seconds() / 60
|
the-stack_0_2211 | # globalprogramlib GUI example by PWRScript
# Import necessary libs
import tkinter
from globalprogramlib.v1 import Language, Translator
class App(tkinter.Tk):
def __init__(self, translator: Translator, *args, **kwargs) -> None:
"""
This class will take care of creating our application
This isn't the best sample, but will demonstrate the
principles of globalprogramlib using a sync GUI lib
"""
super().__init__(*args, **kwargs)
# Master canvas (for easely clear all screen)
self.master_canvas = tkinter.Canvas(self)
self.master_canvas.pack()
# Make translator instance available for all class
self.translator: Translator = translator
# Render app
self.render_choose_language_window()
def clear_screen(self):
"""
Deletes all widgets rendered in the Tkinter application
by destroying the canvas and replacing it
"""
self.master_canvas.destroy()
self.master_canvas = tkinter.Canvas(self)
self.master_canvas.pack()
def render_choose_language_window(self):
"""
This function is the render for our application
"""
# Ensure that screen is cleared every reload to avoid duplicate widgets
self.clear_screen()
# Creates a new label
# Displays the language pick message in current selected language in Translator
# using translator.get_translation("pwrscript.guiapp.language_picker")
tkinter.Label(
self.master_canvas,
text=self.translator.get_translation("pwrscript.guiapp.language_picker"),
).pack()
# This will store the current selected language in translator
# to be show in the "OptionMenu" widget
language = tkinter.StringVar(self.master_canvas)
language.set(self.translator.selected_language)
tkinter.Label(
self.master_canvas,
text=self.translator.get_translation("pwrscript.guiapp.important_message"),
).pack()
tkinter.OptionMenu(
self.master_canvas,
language,
# Here we pass all Languages in the translator to the OptionMenu as separated arguments
*self.translator.languages_loaded.values(),
command=(
# I know this isn't beginner friendly, but I will explain everything
#
# I need to execute an assignment (translator.SelectedLanguageCode = «selected language code»)
# and to re-render this «window» using self.render_choose_language_window() when user changes the language (event)
#
# Unfortunately tkinter "command" only accepts a unique function with one argument (the value selected)
#
# This leaded to render issues and self not being available (no access to translator/application) when I tried
# to implement a «beginner friendly» code for "command"
#
# To acoplish this tasks, I needed to create a lambda (a unique line function) which accepts the argument need
# by the OptionMenu "command" [lang] and the [self] (for getting the translator and application) which is a automatically
# passed when this «event» is «executed»
#
# To solve the assignment issue I defined the SelectedLanguageCode attribute in the translator using the built-in object method
# __set_attr__ since you cannot assign values in a lambda (the best approach to use in other environments is
# translator.SelectedLanguageCode = «selected_language_code»)
#
# The other issue, «re-rendering» was solved by the content in the 4th paragraph
#
lambda lang, self=self: [
self.translator.__setattr__("SelectedLanguageCode", lang.code),
self.render_choose_language_window(),
]
)
).pack()
def BuildTranslator():
"""
This function will take care of creating the translations dynamicaly at runtime, without
needing dependent files, which is ideal for examples and the translator object ready for
use
"""
# Other way to do this, persisting the files in a folder and generating it at runtime if need (example: langs)
"""
# load_translations.py
from os.path import is_file, is_dir, join
from os import mkdir
TRANSLATIONS_FOLDER = "langs"
if not is_dir(TRANSLATIONS_FOLDER):
mkdir(TRANSLATIONS_FOLDER)
if not is_file(join(TRANSLATIONS_FOLDER,"pt.json"))
with Language() as pt:
pt.name = "Português"
pt.code = "pt"
pt.version = 1 # The version needs to be always 1 in this case
pt.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
pt.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
pt.contributors = []
# Creating translations for our app
pt.set_translation("pwrscript.guiapp.language_picker", "Escolha o seu idioma:")
# Saving the translation to a file
pt.to_file(join(TRANSLATIONS_FOLDER,"pt.json"))
# When the context ends, the language file is always cleaned to ensure that it doesn't overflow system resources, so it will look like a new
# instanced Language() and can be clean at any moment by the garbage collector
# This object can be clean since it won't be used again
del pt
if not is_file(join(TRANSLATIONS_FOLDER,"en.json"))
with Language() as en:
en.name = "English"
en.code = "en"
en.version = 1 # The version needs to be always 1 in this case
en.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
en.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
en.contributors = []
# Creating translations for our app
en.set_translation("pwrscript.guiapp.language_picker", "Pick your language:")
# Saving the translation to a file
en.to_file(join(TRANSLATIONS_FOLDER,"en.json"))
del en
translator = Translator()
translator.load_directory(TRANSLATIONS_FOLDER)
translator.DefaultLanguageCode = "en"
translator.SelectedLanguageCode = "en"
"""
# PT Language instantiation
pt = Language()
# Add language information
pt.name = "Português"
pt.code = "pt"
pt.version = 1 # The version needs to be always 1 in this case
pt.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
pt.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
pt.contributors = []
# Creating translations for our app
pt.set_translation("pwrscript.guiapp.language_picker", "Escolha o seu idioma:")
pt.set_translation("pwrscript.guiapp.important_message", "Funcionando em Português")
# EN Language instantiation
en = Language()
# Add language information
en.name = "English"
en.code = "en"
en.version = 1 # The version needs to be always 1 in this case
en.revision = 1 # This is what you need to touch when you need to upgrade the version of the language
en.authors = [
"PWRScript"
] # You can add authors and contributors with their name like this or "name <email>"
en.contributors = []
# Creating translations for our app
en.set_translation("pwrscript.guiapp.language_picker", "Pick your language:")
en.set_translation("pwrscript.guiapp.important_message", "Working in English")
# Translator creation
translator = Translator()
# Loading languages from the created Language() objects
translator.load_languages(pt, en)
# Sets the default (fallback language used when translation can't be found in the selected_language)
# and the selected (first language)
# This is obligatory since the get_translation() method needs to now what languages to use and the codes
# must be valid code languages in the translator (loaded languages) else it won't translate anyting and
# willalways return None
translator.DefaultLanguageCode = "en"
translator.SelectedLanguageCode = "en"
return translator
if __name__ == "__main__":
# Creates the translator for use with the Tkinter class app
translator = BuildTranslator()
# Instances the application class and runs the application
application = App(translator)
application.mainloop()
|
the-stack_0_2213 | import matplotlib.pyplot as plt #import the library, any procedures with plt.* come form this lib
import numpy as np #imports numpy for standard deviation
trials = []
for i in range(1,31):
trials.append(i) #sets up the X axis
#Y axis
data = [2.5105, 2.5100, 2.5103, 2.5091, 2.5101, 2.5101, 2.5103, 2.5098, 2.5098, 2.5100, 2.5090, 2.5099, 2.5101, 2.5091, 2.5100, 2.5099, 2.5089, 2.5097, 2.5099, 2.5099, 2.5099, 2.5096, 2.5099, 2.5121, 2.5094, 2.5102, 2.5090, 2.5101, 2.5089, 2.5100]
#plots the scatter with errorbars
plt.errorbar(trials, data, yerr = 0.0005, marker = '+', linestyle = '', label = "Data")
#axis labels/title
plt.xlabel("Trial Number")
plt.ylabel("Diameter of the Sphere(cm)")
plt.title("Fig. 5: Diameter of a Steel Sphere with Mean and Standard Deviation")
#mean
plt.plot([0]+trials, [2.5099]*31, c = 'red', marker = '', label = 'Mean')
#std dev
print(np.std(data))
plt.plot([0]+trials, [2.5099+np.std(data)]*31, c = 'green', marker = '', label = 'Standard Deviation')
plt.plot([0]+trials, [2.5099-np.std(data)]*31, c = 'green', marker = '')
plt.legend()#generates the legend
plt.show()#displays the plot
|
the-stack_0_2214 | from astropy.time import Time
__all__ = [
"_checkTime"
]
def _checkTime(time, arg_name):
"""
Check that 'time' is an astropy time object, if not, raise an error.
Parameters
----------
time : `~astropy.time.core.Time`
arg_name : str
Name of argument in function.
Returns
-------
None
Raises
------
ValueError : If time is not an astropy time object.
"""
err = (
"Time ({}) has to be an `~astropy.time.core.Time` object.\n"
"Convert using:\n\n"
"from astropy.time import Time\n"
"times = Time(t_array, scale='...', format='...')"
)
if type(time) != Time:
raise TypeError(err.format(arg_name))
return
|
the-stack_0_2215 | import datetime
from rest_framework import permissions, status
from rest_framework.decorators import (api_view,
authentication_classes,
permission_classes,
throttle_classes,)
from django.db.models.expressions import RawSQL
from django.db.models import FloatField
from django.utils import timezone
from rest_framework_expiring_authtoken.authentication import (
ExpiringTokenAuthentication,)
from rest_framework.response import Response
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
from accounts.permissions import HasVerifiedEmail
from base.utils import paginated_queryset, StandardResultSetPagination
from challenges.models import (
ChallengePhase,
Challenge,
ChallengePhaseSplit,
LeaderboardData,)
from challenges.utils import get_challenge_model, get_challenge_phase_model
from participants.models import (ParticipantTeam,)
from participants.utils import (
get_participant_team_id_of_user_for_a_challenge,)
from .models import Submission
from .sender import publish_submission_message
from .serializers import SubmissionSerializer
@throttle_classes([UserRateThrottle])
@api_view(['GET', 'POST'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def challenge_submission(request, challenge_id, challenge_phase_id):
"""API Endpoint for making a submission to a challenge"""
# check if the challenge exists or not
try:
challenge = Challenge.objects.get(pk=challenge_id)
except Challenge.DoesNotExist:
response_data = {'error': 'Challenge does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# check if the challenge phase exists or not
try:
challenge_phase = ChallengePhase.objects.get(
pk=challenge_phase_id, challenge=challenge)
except ChallengePhase.DoesNotExist:
response_data = {'error': 'Challenge Phase does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
if request.method == 'GET':
# getting participant team object for the user for a particular challenge.
participant_team_id = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_id)
# check if participant team exists or not.
try:
ParticipantTeam.objects.get(pk=participant_team_id)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
submission = Submission.objects.filter(participant_team=participant_team_id,
challenge_phase=challenge_phase).order_by('-submitted_at')
paginator, result_page = paginated_queryset(submission, request)
try:
serializer = SubmissionSerializer(result_page, many=True, context={'request': request})
response_data = serializer.data
return paginator.get_paginated_response(response_data)
except:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'POST':
# check if the challenge is active or not
if not challenge.is_active:
response_data = {'error': 'Challenge is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is active
if not challenge_phase.is_active:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is public and accepting solutions
if not challenge_phase.is_public:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not public'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
participant_team_id = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_id)
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_id)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
serializer = SubmissionSerializer(data=request.data,
context={'participant_team': participant_team,
'challenge_phase': challenge_phase,
'request': request
})
if serializer.is_valid():
serializer.save()
response_data = serializer.data
submission = serializer.instance
# publish message in the queue
publish_submission_message(challenge_id, challenge_phase_id, submission.id)
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@throttle_classes([UserRateThrottle])
@api_view(['PATCH'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def change_submission_data_and_visibility(request, challenge_pk, challenge_phase_pk, submission_pk):
"""
API Endpoint for updating the submission meta data
and changing submission visibility.
"""
# check if the challenge exists or not
challenge = get_challenge_model(challenge_pk)
# check if the challenge phase exists or not
challenge_phase = get_challenge_phase_model(challenge_phase_pk)
if not challenge.is_active:
response_data = {'error': 'Challenge is not active'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if challenge phase is public and accepting solutions
if not challenge_phase.is_public:
response_data = {
'error': 'Sorry, cannot accept submissions since challenge phase is not public'}
return Response(response_data, status=status.HTTP_406_NOT_ACCEPTABLE)
participant_team_pk = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_pk)
try:
participant_team = ParticipantTeam.objects.get(pk=participant_team_pk)
except ParticipantTeam.DoesNotExist:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
try:
submission = Submission.objects.get(participant_team=participant_team,
challenge_phase=challenge_phase,
id=submission_pk)
except Submission.DoesNotExist:
response_data = {'error': 'Submission does not exist'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
try:
is_public = request.data['is_public']
if is_public is True:
when_made_public = datetime.datetime.now()
request.data['when_made_public'] = when_made_public
except KeyError:
pass
serializer = SubmissionSerializer(submission,
data=request.data,
context={
'participant_team': participant_team,
'challenge_phase': challenge_phase,
'request': request
},
partial=True)
if serializer.is_valid():
serializer.save()
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@throttle_classes([AnonRateThrottle])
@api_view(['GET'])
def leaderboard(request, challenge_phase_split_id):
"""Returns leaderboard for a corresponding Challenge Phase Split"""
# check if the challenge exists or not
try:
challenge_phase_split = ChallengePhaseSplit.objects.get(
pk=challenge_phase_split_id)
except ChallengePhaseSplit.DoesNotExist:
response_data = {'error': 'Challenge Phase Split does not exist'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Check if the Challenge Phase Split is publicly visible or not
if challenge_phase_split.visibility != ChallengePhaseSplit.PUBLIC:
response_data = {'error': 'Sorry, leaderboard is not public yet for this Challenge Phase Split!'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Get the leaderboard associated with the Challenge Phase Split
leaderboard = challenge_phase_split.leaderboard
# Get the default order by key to rank the entries on the leaderboard
try:
default_order_by = leaderboard.schema['default_order_by']
except:
response_data = {'error': 'Sorry, Default filtering key not found in leaderboard schema!'}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
# Get all the successful submissions related to the challenge phase split
leaderboard_data = LeaderboardData.objects.filter(
challenge_phase_split=challenge_phase_split,
submission__is_public=True,
submission__is_flagged=False).order_by('created_at')
leaderboard_data = leaderboard_data.annotate(
filtering_score=RawSQL('result->>%s', (default_order_by, ), output_field=FloatField())).values(
'id', 'submission__participant_team__team_name',
'challenge_phase_split', 'result', 'filtering_score', 'leaderboard__schema', 'submission__submitted_at')
sorted_leaderboard_data = sorted(leaderboard_data, key=lambda k: float(k['filtering_score']), reverse=True)
distinct_sorted_leaderboard_data = []
team_list = []
for data in sorted_leaderboard_data:
if data['submission__participant_team__team_name'] in team_list:
continue
else:
distinct_sorted_leaderboard_data.append(data)
team_list.append(data['submission__participant_team__team_name'])
leaderboard_labels = challenge_phase_split.leaderboard.schema['labels']
for item in distinct_sorted_leaderboard_data:
item['result'] = [item['result'][index] for index in leaderboard_labels]
paginator, result_page = paginated_queryset(
distinct_sorted_leaderboard_data,
request,
pagination_class=StandardResultSetPagination())
response_data = result_page
return paginator.get_paginated_response(response_data)
@throttle_classes([UserRateThrottle])
@api_view(['GET'])
@permission_classes((permissions.IsAuthenticated, HasVerifiedEmail))
@authentication_classes((ExpiringTokenAuthentication,))
def get_remaining_submissions(request, challenge_phase_pk, challenge_pk):
get_challenge_model(challenge_pk)
challenge_phase = get_challenge_phase_model(challenge_phase_pk)
participant_team_pk = get_participant_team_id_of_user_for_a_challenge(
request.user, challenge_pk)
# Conditional check for the existence of participant team of the user.
if not participant_team_pk:
response_data = {'error': 'You haven\'t participated in the challenge'}
return Response(response_data, status=status.HTTP_403_FORBIDDEN)
max_submission_per_day = challenge_phase.max_submissions_per_day
max_submission = challenge_phase.max_submissions
submissions_done_today_count = Submission.objects.filter(
challenge_phase__challenge=challenge_pk,
challenge_phase=challenge_phase_pk,
participant_team=participant_team_pk,
submitted_at__gte=timezone.now().date()).count()
failed_submissions_count = Submission.objects.filter(
challenge_phase__challenge=challenge_pk,
challenge_phase=challenge_phase_pk,
participant_team=participant_team_pk,
status=Submission.FAILED,
submitted_at__gte=timezone.now().date()).count()
# Checks if today's successfull submission is greater than or equal to max submission per day.
if ((submissions_done_today_count - failed_submissions_count) >= max_submission_per_day
or (max_submission_per_day == 0)):
# Get the UTC time of the instant when the above condition is true.
date_time_now = timezone.now()
# Calculate the next day's date.
date_time_tomorrow = date_time_now.date() + datetime.timedelta(1)
utc = timezone.utc
# Get the midnight time of the day i.e. 12:00 AM of next day.
midnight = utc.localize(datetime.datetime.combine(
date_time_tomorrow, datetime.time()))
# Subtract the current time from the midnight time to get the remaining time for the next day's submissions.
remaining_time = midnight - date_time_now
# Return the remaining time with a message.
response_data = {'message': 'You have exhausted today\'s submission limit',
'remaining_time': remaining_time
}
return Response(response_data, status=status.HTTP_200_OK)
else:
# Calculate the remaining submissions for today.
remaining_submissions_today_count = (max_submission_per_day -
(submissions_done_today_count -
failed_submissions_count)
)
# calculate the remaining submissions from total submissions.
remaining_submission_count = max_submission - \
(submissions_done_today_count - failed_submissions_count)
# Return the above calculated data.
response_data = {'remaining_submissions_today_count': remaining_submissions_today_count,
'remaining_submissions': remaining_submission_count
}
return Response(response_data, status=status.HTTP_200_OK)
|
the-stack_0_2216 | from concurrent.futures import ThreadPoolExecutor, as_completed
from time import time
import boto3
from botocore import UNSIGNED
from botocore.config import Config
from botocore.exceptions import ClientError
from .start_lambda_api_integ_base import StartLambdaIntegBaseClass
class TestParallelRequests(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_same_endpoint(self):
"""
Send two requests to the same path at the same time. This is to ensure we can handle
multiple requests at once and do not block/queue up requests
"""
number_of_requests = 10
start_time = time()
thread_pool = ThreadPoolExecutor(number_of_requests)
futures = [thread_pool.submit(self.lambda_client.invoke, FunctionName="HelloWorldSleepFunction")
for _ in range(0, number_of_requests)]
results = [r.result() for r in as_completed(futures)]
end_time = time()
self.assertEquals(len(results), 10)
self.assertGreater(end_time - start_time, 10)
self.assertLess(end_time - start_time, 20)
for result in results:
self.assertEquals(result.get("Payload").read().decode('utf-8'), '"Slept for 10s"')
class TestLambdaToLambdaInvoke(StartLambdaIntegBaseClass):
template_path = "/testdata/start_lambda/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_local_lambda_calling_local_lambda(self):
pass
class TestLambdaServiceErrorCases(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_invoke_with_non_json_data(self):
expected_error_message = "An error occurred (InvalidRequestContent) when calling the Invoke operation: " \
"Could not parse request body into json: No JSON object could be decoded"
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", Payload='notat:asdfasdf')
self.assertEquals(str(error.exception), expected_error_message)
def test_invoke_with_log_type_not_None(self):
expected_error_message = "An error occurred (NotImplemented) when calling the Invoke operation: " \
"log-type: Tail is not supported. None is only supported."
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", LogType="Tail")
self.assertEquals(str(error.exception), expected_error_message)
def test_invoke_with_invocation_type_not_RequestResponse(self):
expected_error_message = "An error occurred (NotImplemented) when calling the Invoke operation: " \
"invocation-type: DryRun is not supported. RequestResponse is only supported."
with self.assertRaises(ClientError) as error:
self.lambda_client.invoke(FunctionName="EchoEventFunction", InvocationType="DryRun")
self.assertEquals(str(error.exception), expected_error_message)
class TestLambdaService(StartLambdaIntegBaseClass):
template_path = "/testdata/invoke/template.yml"
def setUp(self):
self.url = "http://127.0.0.1:{}".format(self.port)
self.lambda_client = boto3.client('lambda',
endpoint_url=self.url,
use_ssl=False,
verify=False,
config=Config(signature_version=UNSIGNED,
read_timeout=120,
retries={'max_attempts': 0}))
def test_invoke_with_data(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", Payload='"This is json data"')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '"This is json data"')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_no_data(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction")
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_log_type_None(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", LogType='None')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_invocation_type_RequestResponse(self):
response = self.lambda_client.invoke(FunctionName="EchoEventFunction", InvocationType='RequestResponse')
self.assertEquals(response.get("Payload").read().decode('utf-8'), '{}')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
def test_lambda_function_raised_error(self):
response = self.lambda_client.invoke(FunctionName="RaiseExceptionFunction", InvocationType='RequestResponse')
self.assertEquals(response.get("Payload").read().decode('utf-8'),
'{"errorMessage": "Lambda is raising an exception", '
'"errorType": "Exception", '
'"stackTrace": [["/var/task/main.py", 43, "raise_exception", '
'"raise Exception(\\"Lambda is raising an exception\\")"]]}')
self.assertEquals(response.get("FunctionError"), 'Unhandled')
self.assertEquals(response.get("StatusCode"), 200)
def test_invoke_with_function_timeout(self):
"""
This behavior does not match the actually Lambda Service. For functions that timeout, data returned like the
following:
{"errorMessage":"<timestamp> <request_id> Task timed out after 5.00 seconds"}
For Local Lambda's, however, timeouts are an interrupt on the thread that runs invokes the function. Since the
invoke is on a different thread, we do not (currently) have a way to communicate this back to the caller. So
when a timeout happens locally, we do not add the FunctionError: Unhandled to the response and have an empty
string as the data returned (because no data was found in stdout from the container).
"""
response = self.lambda_client.invoke(FunctionName="TimeoutFunction")
self.assertEquals(response.get("Payload").read().decode('utf-8'), '')
self.assertIsNone(response.get("FunctionError"))
self.assertEquals(response.get("StatusCode"), 200)
|
the-stack_0_2217 | import asyncio
import time
def timed(fn, *args, **kwargs):
name = fn.__name__
times = []
last = before = time.time()
duration = 0
while duration < 1.0:
if asyncio.iscoroutinefunction(fn):
asyncio.run(fn(*args, **kwargs))
else:
fn(*args, **kwargs)
now = time.time()
times.append(now - last)
last = now
duration = now - before
count = len(times)
times = list(sorted(times))
best = times[:3]
avg = sum(best) / len(best)
if avg < 0.001:
avg *= 1000000
unit = "usec"
elif avg < 0.1:
avg *= 1000
unit = "msec"
else:
unit = "sec"
print(f"{count} runs of {name} in {duration:.1f}s: {avg:.3f} {unit} per run")
return count, duration
|
the-stack_0_2220 | import pytest
import math
import numpy as np
from pandas import read_table, DataFrame, Series
from catboost import Pool, CatBoost, CatBoostClassifier, CatBoostRegressor, CatboostError, cv
from catboost_pytest_lib import data_file, local_canonical_file, remove_time_from_json
import yatest.common
EPS = 1e-5
TRAIN_FILE = data_file('adult', 'train_small')
TEST_FILE = data_file('adult', 'test_small')
CD_FILE = data_file('adult', 'train.cd')
NAN_TRAIN_FILE = data_file('adult_nan', 'train_small')
NAN_TEST_FILE = data_file('adult_nan', 'test_small')
NAN_CD_FILE = data_file('adult_nan', 'train.cd')
CLOUDNESS_TRAIN_FILE = data_file('cloudness_small', 'train_small')
CLOUDNESS_TEST_FILE = data_file('cloudness_small', 'test_small')
CLOUDNESS_CD_FILE = data_file('cloudness_small', 'train.cd')
QUERY_TRAIN_FILE = data_file('querywise_pool', 'train_full3')
QUERY_TEST_FILE = data_file('querywise_pool', 'test3')
QUERY_CD_FILE = data_file('querywise_pool', 'train_full3.cd')
OUTPUT_MODEL_PATH = 'model.bin'
PREDS_PATH = 'predictions.npy'
FIMP_PATH = 'feature_importance.npy'
JSON_LOG_PATH = 'catboost_training.json'
TARGET_IDX = 1
CAT_FEATURES = [0, 1, 2, 4, 6, 8, 9, 10, 11, 12, 16]
model_diff_tool = yatest.common.binary_path("catboost/tools/model_comparator/model_comparator")
def compare_canonical_models(*args, **kwargs):
return local_canonical_file(*args, diff_tool=model_diff_tool, **kwargs)
def map_cat_features(data, cat_features):
for i in range(len(data)):
for j in cat_features:
data[i][j] = str(data[i][j])
return data
def _check_shape(pool):
return np.shape(pool.get_features()) == (101, 17)
def _check_data(data1, data2):
return np.all(np.isclose(data1, data2, rtol=0.001, equal_nan=True))
def test_load_file():
assert _check_shape(Pool(TRAIN_FILE, column_description=CD_FILE))
def test_load_list():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = map_cat_features(pool.get_features(), cat_features)
label = pool.get_label()
assert _check_shape(Pool(data, label, cat_features))
def test_load_ndarray():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cat_features = pool.get_cat_feature_indices()
data = np.array(map_cat_features(pool.get_features(), cat_features))
label = np.array(pool.get_label())
assert _check_shape(Pool(data, label, cat_features))
def test_load_df():
pool = Pool(NAN_TRAIN_FILE, column_description=NAN_CD_FILE)
data = read_table(NAN_TRAIN_FILE, header=None)
label = DataFrame(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, label, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert _check_data(pool.get_label(), pool2.get_label())
def test_load_series():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
data = read_table(TRAIN_FILE, header=None)
label = Series(data.iloc[:, TARGET_IDX])
data.drop([TARGET_IDX], axis=1, inplace=True)
data = Series(list(data.values))
cat_features = pool.get_cat_feature_indices()
pool2 = Pool(data, label, cat_features)
assert _check_data(pool.get_features(), pool2.get_features())
assert _check_data(pool.get_label(), pool2.get_label())
def test_pool_cat_features():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
assert np.all(pool.get_cat_feature_indices() == CAT_FEATURES)
def test_load_generated():
pool_size = (100, 10)
data = np.round(np.random.normal(size=pool_size), decimals=3)
label = np.random.randint(2, size=pool_size[0])
pool = Pool(data, label)
assert _check_data(pool.get_features(), data)
assert _check_data(pool.get_label(), label)
def test_load_dumps():
pool_size = (100, 10)
data = np.random.randint(10, size=pool_size)
label = np.random.randint(2, size=pool_size[0])
pool1 = Pool(data, label)
lines = []
for i in range(len(data)):
line = [str(label[i])] + [str(x) for x in data[i]]
lines.append('\t'.join(line))
text = '\n'.join(lines)
with open('test_data_dumps', 'w') as f:
f.write(text)
pool2 = Pool('test_data_dumps')
assert _check_data(pool1.get_features(), pool2.get_features())
assert _check_data(pool1.get_label(), pool2.get_label())
def test_predict_regress():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({'iterations': 2, 'random_seed': 0, 'loss_function': 'RMSE'})
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_sklearn_regress():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=2, random_seed=0)
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_sklearn_class():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0, loss_function='Logloss:border=0.5')
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_predict_class_raw():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict(test_pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_predict_class():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict(test_pool, prediction_type="Class")
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_predict_class_proba():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred = model.predict_proba(test_pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_no_cat_in_predict():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(train_pool)
pred1 = model.predict(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()))
pred2 = model.predict(Pool(map_cat_features(test_pool.get_features(), train_pool.get_cat_feature_indices()), cat_features=train_pool.get_cat_feature_indices()))
assert _check_data(pred1, pred2)
def test_save_model():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoost()
model.fit(train_pool)
model.save_model(OUTPUT_MODEL_PATH)
model2 = CatBoost(model_file=OUTPUT_MODEL_PATH)
pred1 = model.predict(test_pool)
pred2 = model2.predict(test_pool)
assert _check_data(pred1, pred2)
def test_multiclass():
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
classifier = CatBoostClassifier(iterations=2, random_seed=0, loss_function='MultiClass', thread_count=8)
classifier.fit(pool)
classifier.save_model(OUTPUT_MODEL_PATH)
new_classifier = CatBoostClassifier()
new_classifier.load_model(OUTPUT_MODEL_PATH)
pred = new_classifier.predict_proba(pool)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_querywise():
train_pool = Pool(QUERY_TRAIN_FILE, column_description=QUERY_CD_FILE)
test_pool = Pool(QUERY_TEST_FILE, column_description=QUERY_CD_FILE)
model = CatBoost(params={'loss_function': 'QueryRMSE', 'random_seed': 0, 'iterations': 2, 'thread_count': 8})
model.fit(train_pool)
pred1 = model.predict(test_pool)
df = read_table(QUERY_TRAIN_FILE, delimiter='\t', header=None)
train_query_id = df.loc[:, 1]
train_target = df.loc[:, 0]
train_data = df.drop([0, 1], axis=1).astype(str)
df = read_table(QUERY_TEST_FILE, delimiter='\t', header=None)
test_data = df.drop([0, 1], axis=1).astype(str)
model.fit(train_data, train_target, query_id=train_query_id)
pred2 = model.predict(test_data)
assert _check_data(pred1, pred2)
def test_zero_baseline():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
baseline = np.zeros(pool.num_row())
pool.set_baseline(baseline)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ones_weight():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.ones(pool.num_row())
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_non_ones_weight():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
weight = np.arange(1, pool.num_row()+1)
pool.set_weight(weight)
model = CatBoostClassifier(iterations=2, random_seed=0)
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_fit_data():
pool = Pool(CLOUDNESS_TRAIN_FILE, column_description=CLOUDNESS_CD_FILE)
eval_pool = Pool(CLOUDNESS_TEST_FILE, column_description=CLOUDNESS_CD_FILE)
base_model = CatBoostClassifier(iterations=2, random_seed=0, loss_function="MultiClass")
base_model.fit(pool)
baseline = np.array(base_model.predict(pool, prediction_type='RawFormulaVal'))
eval_baseline = np.array(base_model.predict(eval_pool, prediction_type='RawFormulaVal'))
eval_pool.set_baseline(eval_baseline)
model = CatBoostClassifier(iterations=2, random_seed=0, loss_function="MultiClass")
data = map_cat_features(pool.get_features(), pool.get_cat_feature_indices())
model.fit(data, pool.get_label(), pool.get_cat_feature_indices(), sample_weight=np.arange(1, pool.num_row()+1), baseline=baseline, use_best_model=True, eval_set=eval_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ntree_limit():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=100, random_seed=0)
model.fit(train_pool)
pred = model.predict_proba(test_pool, ntree_end=10)
np.save(PREDS_PATH, np.array(pred))
return local_canonical_file(PREDS_PATH)
def test_staged_predict():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=10, random_seed=0)
model.fit(train_pool)
preds = []
for pred in model.staged_predict(test_pool):
preds.append(pred)
np.save(PREDS_PATH, np.array(preds))
return local_canonical_file(PREDS_PATH)
def test_invalid_loss_base():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoost({"loss_function": "abcdef"})
model.fit(pool)
def test_invalid_loss_classifier():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(loss_function="abcdef")
model.fit(pool)
def test_invalid_loss_regressor():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(loss_function="fee")
model.fit(pool)
def test_no_eval_set():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.fit(pool, use_best_model=True)
def test_fit_no_label():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.fit(pool.get_features())
def test_predict_without_fit():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier()
model.predict(pool)
def test_real_numbers_cat_features():
with pytest.raises(CatboostError):
data = np.random.rand(100, 10)
label = np.random.randint(2, size=100)
Pool(data, label, [1, 2])
def test_wrong_ctr_for_classification():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform'])
model.fit(pool)
def test_wrong_feature_count():
with pytest.raises(CatboostError):
data = np.random.rand(100, 10)
label = np.random.randint(2, size=100)
model = CatBoostClassifier()
model.fit(data, label)
model.predict(data[:, :-1])
def test_feature_importance_off():
with pytest.raises(CatboostError):
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, calc_feature_importance=False)
model.fit(pool)
model.feature_importances_
def test_wrong_params_classifier():
with pytest.raises(CatboostError):
CatBoostClassifier(wrong_param=1)
def test_wrong_params_base():
with pytest.raises(CatboostError):
CatBoost({'wrong_param': 1})
def test_wrong_params_regressor():
with pytest.raises(CatboostError):
CatBoostRegressor(wrong_param=1)
def test_wrong_kwargs_base():
with pytest.raises(CatboostError):
CatBoost({'kwargs': {'wrong_param': 1}})
def test_custom_eval():
class LoglossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in xrange(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += w * (target[i] * approx[i] - math.log(1 + math.exp(approx[i])))
return error_sum, weight_sum
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, eval_metric=LoglossMetric())
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool)
model2 = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, eval_metric="Logloss")
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool)
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
def test_custom_objective():
class LoglossObjective(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
if weights is not None:
assert len(weights) == len(approxes)
exponents = []
for index in xrange(len(approxes)):
exponents.append(math.exp(approxes[index]))
result = []
for index in xrange(len(targets)):
p = exponents[index] / (1 + exponents[index])
der1 = (1 - p) if targets[index] > 0.0 else -p
der2 = -p * (1 - p)
if weights is not None:
der1 *= weights[index]
der2 *= weights[index]
result.append((der1, der2))
return result
train_pool = Pool(data=TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(data=TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True,
loss_function=LoglossObjective(), eval_metric="Logloss",
# Leaf estimation method and gradient iteration are set to match
# defaults for Logloss.
leaf_estimation_method="Newton", leaf_estimation_iterations=10)
model.fit(train_pool, eval_set=test_pool)
pred1 = model.predict(test_pool, prediction_type='RawFormulaVal')
model2 = CatBoostClassifier(iterations=5, random_seed=0, use_best_model=True, loss_function="Logloss")
model2.fit(train_pool, eval_set=test_pool)
pred2 = model2.predict(test_pool, prediction_type='RawFormulaVal')
for p1, p2 in zip(pred1, pred2):
assert abs(p1 - p2) < EPS
def test_pool_after_fit():
pool1 = Pool(TRAIN_FILE, column_description=CD_FILE)
pool2 = Pool(TRAIN_FILE, column_description=CD_FILE)
assert _check_data(pool1.get_features(), pool2.get_features())
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool2)
assert _check_data(pool1.get_features(), pool2.get_features())
def test_priors():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, has_time=True, ctr_description=["Borders:Prior=0:Prior=0.6:Prior=1:Prior=5", "Counter:Prior=0:Prior=0.6:Prior=1:Prior=5"])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_ignored_features():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model1 = CatBoostClassifier(iterations=5, random_seed=0, ignored_features=[1, 2, 3])
model2 = CatBoostClassifier(iterations=5, random_seed=0)
model1.fit(train_pool)
model2.fit(train_pool)
predictions1 = model1.predict(test_pool)
predictions2 = model2.predict(test_pool)
assert not _check_data(predictions1, predictions2)
model1.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_class_weights():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, class_weights=[1, 2])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_classification_ctr():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0, ctr_description=['Borders', 'Counter'])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_regression_ctr():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostRegressor(iterations=5, random_seed=0, ctr_description=['Borders:TargetBorderCount=5:TargetBorderType=Uniform', 'Counter'])
model.fit(pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_copy_model():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model1 = CatBoostRegressor(iterations=5, random_seed=0)
model1.fit(pool)
model2 = model1.copy()
predictions1 = model1.predict(pool)
predictions2 = model2.predict(pool)
assert _check_data(predictions1, predictions2)
model2.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
results = cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss"})
assert isinstance(results, dict)
assert "Logloss_train_avg" in results
prev_value = results["Logloss_train_avg"][0]
for value in results["Logloss_train_avg"][1:]:
assert value < prev_value
prev_value = value
def test_cv_query():
pool = Pool(QUERY_TRAIN_FILE, column_description=QUERY_CD_FILE)
results = cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "QueryRMSE"})
assert isinstance(results, dict)
assert "QueryRMSE_train_avg" in results
prev_value = results["QueryRMSE_train_avg"][0]
for value in results["QueryRMSE_train_avg"][1:]:
assert value < prev_value
prev_value = value
def test_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.feature_importances_))
return local_canonical_file(FIMP_PATH)
def test_interaction_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(pool, fstr_type='Interaction')))
return local_canonical_file(FIMP_PATH)
def test_doc_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(pool, fstr_type='Doc')))
return local_canonical_file(FIMP_PATH)
def test_one_doc_feature_importance():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
model = CatBoostClassifier(iterations=5, random_seed=0)
model.fit(pool)
np.save(FIMP_PATH, np.array(model.get_feature_importance(np.ones(pool.num_col(), dtype=int), 0, cat_features=pool.get_cat_feature_indices(), fstr_type='Doc')))
return local_canonical_file(FIMP_PATH)
def test_od():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(od_type='Iter', od_wait=20, random_seed=42)
model.fit(train_pool, eval_set=test_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_clone():
estimator = CatBoostClassifier(
custom_metric="Accuracy",
loss_function="MultiClass",
iterations=400)
# This is important for sklearn.base.clone since
# it uses get_params for cloning estimator.
params = estimator.get_params()
new_estimator = CatBoostClassifier(**params)
new_params = new_estimator.get_params()
for param in params:
assert param in new_params
assert new_params[param] == params[param]
def test_different_cat_features_order():
dataset = np.array([[2, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
labels = [1.2, 3.4, 9.5, 24.5]
pool1 = Pool(dataset, labels, cat_features=[0, 1])
pool2 = Pool(dataset, labels, cat_features=[1, 0])
model = CatBoost({'learning_rate': 1, 'loss_function': 'RMSE', 'iterations': 2, 'random_seed': 42})
model.fit(pool1)
assert (model.predict(pool1) == model.predict(pool2)).all()
def test_full_history():
train_pool = Pool(TRAIN_FILE, column_description=CD_FILE)
test_pool = Pool(TEST_FILE, column_description=CD_FILE)
model = CatBoostClassifier(od_type='Iter', od_wait=20, random_seed=42, approx_on_full_history=True)
model.fit(train_pool, eval_set=test_pool)
model.save_model(OUTPUT_MODEL_PATH)
return compare_canonical_models(OUTPUT_MODEL_PATH)
def test_bad_params_in_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
with pytest.warns(UserWarning):
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "use_best_model": True})
def test_cv_logging():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "json_log": JSON_LOG_PATH})
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
def test_cv_with_not_binarized_target():
train_file = data_file('adult_not_binarized', 'train_small')
cd = data_file('adult_not_binarized', 'train.cd')
pool = Pool(train_file, column_description=cd)
cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "json_log": JSON_LOG_PATH})
return local_canonical_file(remove_time_from_json(JSON_LOG_PATH))
|
the-stack_0_2223 | import turtle, random
rat = turtle.Turtle()
screen = turtle.Screen()
dot_distance = 75
#width = 5
height = 5
rat.penup()
screen.register_shape("NickCage.gif")
rat.shape("NickCage.gif")
def draw_a_star():
for i in range(5):
rat.pendown()
rat.forward(50)
rat.right(144)
rat.penup()
for y in range(height):
# rat.dot()
rat.right(random.randrange(0,360,1))
rat.forward(dot_distance-random.randrange(-100,100,1))
draw_a_star()
turtle.done()
|
the-stack_0_2224 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MedicationKnowledge
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import sys
from . import backboneelement, domainresource
class MedicationKnowledge(domainresource.DomainResource):
""" Definition of Medication Knowledge.
Information about a medication that is used to support knowledge.
"""
resource_type = "MedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.administrationGuidelines = None
""" Guidelines for administration of the medication.
List of `MedicationKnowledgeAdministrationGuidelines` items (represented as `dict` in JSON). """
self.amount = None
""" Amount of drug in package.
Type `Quantity` (represented as `dict` in JSON). """
self.associatedMedication = None
""" A medication resource that is associated with this medication.
List of `FHIRReference` items referencing `['Medication']` (represented as `dict` in JSON). """
self.code = None
""" Code that identifies this medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contraindication = None
""" Potential clinical issue with or between medication(s).
List of `FHIRReference` items referencing `['DetectedIssue']` (represented as `dict` in JSON). """
self.cost = None
""" The pricing of the medication.
List of `MedicationKnowledgeCost` items (represented as `dict` in JSON). """
self.doseForm = None
""" powder | tablets | capsule +.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.drugCharacteristic = None
""" Specifies descriptive properties of the medicine.
List of `MedicationKnowledgeDrugCharacteristic` items (represented as `dict` in JSON). """
self.ingredient = None
""" Active or inactive ingredient.
List of `MedicationKnowledgeIngredient` items (represented as `dict` in JSON). """
self.intendedRoute = None
""" The intended or approved route of administration.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.kinetics = None
""" The time course of drug absorption, distribution, metabolism and
excretion of a medication from the body.
List of `MedicationKnowledgeKinetics` items (represented as `dict` in JSON). """
self.manufacturer = None
""" Manufacturer of the item.
Type `FHIRReference` referencing `['Organization']` (represented as `dict` in JSON). """
self.medicineClassification = None
""" Categorization of the medication within a formulary or
classification system.
List of `MedicationKnowledgeMedicineClassification` items (represented as `dict` in JSON). """
self.monitoringProgram = None
""" Program under which a medication is reviewed.
List of `MedicationKnowledgeMonitoringProgram` items (represented as `dict` in JSON). """
self.monograph = None
""" Associated documentation about the medication.
List of `MedicationKnowledgeMonograph` items (represented as `dict` in JSON). """
self.packaging = None
""" Details about packaged medications.
Type `MedicationKnowledgePackaging` (represented as `dict` in JSON). """
self.preparationInstruction = None
""" The instructions for preparing the medication.
Type `str`. """
self.productType = None
""" Category of the medication or product.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.regulatory = None
""" Regulatory information about a medication.
List of `MedicationKnowledgeRegulatory` items (represented as `dict` in JSON). """
self.relatedMedicationKnowledge = None
""" Associated or related medication information.
List of `MedicationKnowledgeRelatedMedicationKnowledge` items (represented as `dict` in JSON). """
self.status = None
""" active | inactive | entered-in-error.
Type `str`. """
self.synonym = None
""" Additional names for a medication.
List of `str` items. """
super(MedicationKnowledge, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledge, self).elementProperties()
js.extend(
[
(
"administrationGuidelines",
"administrationGuidelines",
MedicationKnowledgeAdministrationGuidelines,
"MedicationKnowledgeAdministrationGuidelines",
True,
None,
False,
),
("amount", "amount", quantity.Quantity, "Quantity", False, None, False),
(
"associatedMedication",
"associatedMedication",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
(
"code",
"code",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"contraindication",
"contraindication",
fhirreference.FHIRReference,
"Reference",
True,
None,
False,
),
(
"cost",
"cost",
MedicationKnowledgeCost,
"MedicationKnowledgeCost",
True,
None,
False,
),
(
"doseForm",
"doseForm",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"drugCharacteristic",
"drugCharacteristic",
MedicationKnowledgeDrugCharacteristic,
"MedicationKnowledgeDrugCharacteristic",
True,
None,
False,
),
(
"ingredient",
"ingredient",
MedicationKnowledgeIngredient,
"MedicationKnowledgeIngredient",
True,
None,
False,
),
(
"intendedRoute",
"intendedRoute",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"kinetics",
"kinetics",
MedicationKnowledgeKinetics,
"MedicationKnowledgeKinetics",
True,
None,
False,
),
(
"manufacturer",
"manufacturer",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"medicineClassification",
"medicineClassification",
MedicationKnowledgeMedicineClassification,
"MedicationKnowledgeMedicineClassification",
True,
None,
False,
),
(
"monitoringProgram",
"monitoringProgram",
MedicationKnowledgeMonitoringProgram,
"MedicationKnowledgeMonitoringProgram",
True,
None,
False,
),
(
"monograph",
"monograph",
MedicationKnowledgeMonograph,
"MedicationKnowledgeMonograph",
True,
None,
False,
),
(
"packaging",
"packaging",
MedicationKnowledgePackaging,
"MedicationKnowledgePackaging",
False,
None,
False,
),
(
"preparationInstruction",
"preparationInstruction",
str,
"markdown",
False,
None,
False,
),
(
"productType",
"productType",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"regulatory",
"regulatory",
MedicationKnowledgeRegulatory,
"MedicationKnowledgeRegulatory",
True,
None,
False,
),
(
"relatedMedicationKnowledge",
"relatedMedicationKnowledge",
MedicationKnowledgeRelatedMedicationKnowledge,
"MedicationKnowledgeRelatedMedicationKnowledge",
True,
None,
False,
),
("status", "status", str, "code", False, None, False),
("synonym", "synonym", str, "string", True, None, False),
]
)
return js
class MedicationKnowledgeAdministrationGuidelines(backboneelement.BackboneElement):
""" Guidelines for administration of the medication.
Guidelines for the administration of the medication.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelines"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesDosage` items (represented as `dict` in JSON). """
self.indicationCodeableConcept = None
""" Indication for use that apply to the specific administration
guidelines.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.indicationReference = None
""" Indication for use that apply to the specific administration
guidelines.
Type `FHIRReference` referencing `['ObservationDefinition']` (represented as `dict` in JSON). """
self.patientCharacteristics = None
""" Characteristics of the patient that are relevant to the
administration guidelines.
List of `MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics` items (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelines, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelines, self
).elementProperties()
js.extend(
[
(
"dosage",
"dosage",
MedicationKnowledgeAdministrationGuidelinesDosage,
"MedicationKnowledgeAdministrationGuidelinesDosage",
True,
None,
False,
),
(
"indicationCodeableConcept",
"indicationCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"indication",
False,
),
(
"indicationReference",
"indicationReference",
fhirreference.FHIRReference,
"Reference",
False,
"indication",
False,
),
(
"patientCharacteristics",
"patientCharacteristics",
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics,
"MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeAdministrationGuidelinesDosage(
backboneelement.BackboneElement
):
""" Dosage for the medication for the specific guidelines.
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesDosage"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dosage = None
""" Dosage for the medication for the specific guidelines.
List of `Dosage` items (represented as `dict` in JSON). """
self.type = None
""" Type of dosage.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeAdministrationGuidelinesDosage, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelinesDosage, self
).elementProperties()
js.extend(
[
("dosage", "dosage", dosage.Dosage, "Dosage", True, None, True),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics(
backboneelement.BackboneElement
):
""" Characteristics of the patient that are relevant to the administration
guidelines.
Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
"""
resource_type = "MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.characteristicCodeableConcept = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.characteristicQuantity = None
""" Specific characteristic that is relevant to the administration
guideline.
Type `Quantity` (represented as `dict` in JSON). """
self.value = None
""" The specific characteristic.
List of `str` items. """
super(
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self
).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(
MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics, self
).elementProperties()
js.extend(
[
(
"characteristicCodeableConcept",
"characteristicCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"characteristic",
True,
),
(
"characteristicQuantity",
"characteristicQuantity",
quantity.Quantity,
"Quantity",
False,
"characteristic",
True,
),
("value", "value", str, "string", True, None, False),
]
)
return js
class MedicationKnowledgeCost(backboneelement.BackboneElement):
""" The pricing of the medication.
The price of the medication.
"""
resource_type = "MedicationKnowledgeCost"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.cost = None
""" The price of the medication.
Type `Money` (represented as `dict` in JSON). """
self.source = None
""" The source or owner for the price information.
Type `str`. """
self.type = None
""" The category of the cost information.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeCost, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationKnowledgeCost, self).elementProperties()
js.extend(
[
("cost", "cost", money.Money, "Money", False, None, True),
("source", "source", str, "string", False, None, False),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeDrugCharacteristic(backboneelement.BackboneElement):
""" Specifies descriptive properties of the medicine.
Specifies descriptive properties of the medicine, such as color, shape,
imprints, etc.
"""
resource_type = "MedicationKnowledgeDrugCharacteristic"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.type = None
""" Code specifying the type of characteristic of medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueBase64Binary = None
""" Description of the characteristic.
Type `str`. """
self.valueCodeableConcept = None
""" Description of the characteristic.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.valueQuantity = None
""" Description of the characteristic.
Type `Quantity` (represented as `dict` in JSON). """
self.valueString = None
""" Description of the characteristic.
Type `str`. """
super(MedicationKnowledgeDrugCharacteristic, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeDrugCharacteristic, self).elementProperties()
js.extend(
[
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
(
"valueBase64Binary",
"valueBase64Binary",
str,
"base64Binary",
False,
"value",
False,
),
(
"valueCodeableConcept",
"valueCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"value",
False,
),
(
"valueQuantity",
"valueQuantity",
quantity.Quantity,
"Quantity",
False,
"value",
False,
),
("valueString", "valueString", str, "string", False, "value", False),
]
)
return js
class MedicationKnowledgeIngredient(backboneelement.BackboneElement):
""" Active or inactive ingredient.
Identifies a particular constituent of interest in the product.
"""
resource_type = "MedicationKnowledgeIngredient"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.isActive = None
""" Active ingredient indicator.
Type `bool`. """
self.itemCodeableConcept = None
""" Medication(s) or substance(s) contained in the medication.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.itemReference = None
""" Medication(s) or substance(s) contained in the medication.
Type `FHIRReference` referencing `['Substance']` (represented as `dict` in JSON). """
self.strength = None
""" Quantity of ingredient present.
Type `Ratio` (represented as `dict` in JSON). """
super(MedicationKnowledgeIngredient, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeIngredient, self).elementProperties()
js.extend(
[
("isActive", "isActive", bool, "boolean", False, None, False),
(
"itemCodeableConcept",
"itemCodeableConcept",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
"item",
True,
),
(
"itemReference",
"itemReference",
fhirreference.FHIRReference,
"Reference",
False,
"item",
True,
),
("strength", "strength", ratio.Ratio, "Ratio", False, None, False),
]
)
return js
class MedicationKnowledgeKinetics(backboneelement.BackboneElement):
""" The time course of drug absorption, distribution, metabolism and excretion
of a medication from the body.
"""
resource_type = "MedicationKnowledgeKinetics"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.areaUnderCurve = None
""" The drug concentration measured at certain discrete points in time.
List of `Quantity` items (represented as `dict` in JSON). """
self.halfLifePeriod = None
""" Time required for concentration in the body to decrease by half.
Type `Duration` (represented as `dict` in JSON). """
self.lethalDose50 = None
""" The median lethal dose of a drug.
List of `Quantity` items (represented as `dict` in JSON). """
super(MedicationKnowledgeKinetics, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeKinetics, self).elementProperties()
js.extend(
[
(
"areaUnderCurve",
"areaUnderCurve",
quantity.Quantity,
"Quantity",
True,
None,
False,
),
(
"halfLifePeriod",
"halfLifePeriod",
duration.Duration,
"Duration",
False,
None,
False,
),
(
"lethalDose50",
"lethalDose50",
quantity.Quantity,
"Quantity",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeMedicineClassification(backboneelement.BackboneElement):
""" Categorization of the medication within a formulary or classification
system.
"""
resource_type = "MedicationKnowledgeMedicineClassification"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.classification = None
""" Specific category assigned to the medication.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" The type of category for the medication (for example, therapeutic
classification, therapeutic sub-classification).
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMedicineClassification, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMedicineClassification, self).elementProperties()
js.extend(
[
(
"classification",
"classification",
codeableconcept.CodeableConcept,
"CodeableConcept",
True,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeMonitoringProgram(backboneelement.BackboneElement):
""" Program under which a medication is reviewed.
The program under which the medication is reviewed.
"""
resource_type = "MedicationKnowledgeMonitoringProgram"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of the reviewing program.
Type `str`. """
self.type = None
""" Type of program under which the medication is monitored.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonitoringProgram, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMonitoringProgram, self).elementProperties()
js.extend(
[
("name", "name", str, "string", False, None, False),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgeMonograph(backboneelement.BackboneElement):
""" Associated documentation about the medication.
"""
resource_type = "MedicationKnowledgeMonograph"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.source = None
""" Associated documentation about the medication.
Type `FHIRReference` referencing `['DocumentReference', 'Media']` (represented as `dict` in JSON). """
self.type = None
""" The category of medication document.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeMonograph, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeMonograph, self).elementProperties()
js.extend(
[
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgePackaging(backboneelement.BackboneElement):
""" Details about packaged medications.
Information that only applies to packages (not products).
"""
resource_type = "MedicationKnowledgePackaging"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.quantity = None
""" The number of product units the package would contain if fully
loaded.
Type `Quantity` (represented as `dict` in JSON). """
self.type = None
""" A code that defines the specific type of packaging that the
medication can be found in.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgePackaging, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgePackaging, self).elementProperties()
js.extend(
[
(
"quantity",
"quantity",
quantity.Quantity,
"Quantity",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class MedicationKnowledgeRegulatory(backboneelement.BackboneElement):
""" Regulatory information about a medication.
"""
resource_type = "MedicationKnowledgeRegulatory"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.maxDispense = None
""" The maximum number of units of the medication that can be dispensed
in a period.
Type `MedicationKnowledgeRegulatoryMaxDispense` (represented as `dict` in JSON). """
self.regulatoryAuthority = None
""" Specifies the authority of the regulation.
Type `FHIRReference` referencing `['Organization']` (represented as `dict` in JSON). """
self.schedule = None
""" Specifies the schedule of a medication in jurisdiction.
List of `MedicationKnowledgeRegulatorySchedule` items (represented as `dict` in JSON). """
self.substitution = None
""" Specifies if changes are allowed when dispensing a medication from
a regulatory perspective.
List of `MedicationKnowledgeRegulatorySubstitution` items (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatory, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatory, self).elementProperties()
js.extend(
[
(
"maxDispense",
"maxDispense",
MedicationKnowledgeRegulatoryMaxDispense,
"MedicationKnowledgeRegulatoryMaxDispense",
False,
None,
False,
),
(
"regulatoryAuthority",
"regulatoryAuthority",
fhirreference.FHIRReference,
"Reference",
False,
None,
True,
),
(
"schedule",
"schedule",
MedicationKnowledgeRegulatorySchedule,
"MedicationKnowledgeRegulatorySchedule",
True,
None,
False,
),
(
"substitution",
"substitution",
MedicationKnowledgeRegulatorySubstitution,
"MedicationKnowledgeRegulatorySubstitution",
True,
None,
False,
),
]
)
return js
class MedicationKnowledgeRegulatoryMaxDispense(backboneelement.BackboneElement):
""" The maximum number of units of the medication that can be dispensed in a
period.
"""
resource_type = "MedicationKnowledgeRegulatoryMaxDispense"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" The period that applies to the maximum number of units.
Type `Duration` (represented as `dict` in JSON). """
self.quantity = None
""" The maximum number of units of the medication that can be dispensed.
Type `Quantity` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatoryMaxDispense, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatoryMaxDispense, self).elementProperties()
js.extend(
[
("period", "period", duration.Duration, "Duration", False, None, False),
(
"quantity",
"quantity",
quantity.Quantity,
"Quantity",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRegulatorySchedule(backboneelement.BackboneElement):
""" Specifies the schedule of a medication in jurisdiction.
"""
resource_type = "MedicationKnowledgeRegulatorySchedule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.schedule = None
""" Specifies the specific drug schedule.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySchedule, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySchedule, self).elementProperties()
js.extend(
[
(
"schedule",
"schedule",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRegulatorySubstitution(backboneelement.BackboneElement):
""" Specifies if changes are allowed when dispensing a medication from a
regulatory perspective.
"""
resource_type = "MedicationKnowledgeRegulatorySubstitution"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allowed = None
""" Specifies if regulation allows for changes in the medication when
dispensing.
Type `bool`. """
self.type = None
""" Specifies the type of substitution allowed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRegulatorySubstitution, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(MedicationKnowledgeRegulatorySubstitution, self).elementProperties()
js.extend(
[
("allowed", "allowed", bool, "boolean", False, None, True),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
class MedicationKnowledgeRelatedMedicationKnowledge(backboneelement.BackboneElement):
""" Associated or related medication information.
Associated or related knowledge about a medication.
"""
resource_type = "MedicationKnowledgeRelatedMedicationKnowledge"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.reference = None
""" Associated documentation about the associated medication knowledge.
List of `FHIRReference` items referencing `['MedicationKnowledge']` (represented as `dict` in JSON). """
self.type = None
""" Category of medicationKnowledge.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MedicationKnowledgeRelatedMedicationKnowledge, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(
MedicationKnowledgeRelatedMedicationKnowledge, self
).elementProperties()
js.extend(
[
(
"reference",
"reference",
fhirreference.FHIRReference,
"Reference",
True,
None,
True,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
]
)
return js
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + ".dosage"]
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + ".duration"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import money
except ImportError:
money = sys.modules[__package__ + ".money"]
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + ".quantity"]
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + ".ratio"]
|
the-stack_0_2226 | #!/usr/bin/python3
import unittest
from base import TestBase
class LoginTest(TestBase):
def test_anonymous_login(self):
info = self.call('/user')
self.assertIsNone(info['user'])
def test_logged_in(self):
with self.client:
email = '[email protected]'
self.login(email)
info = self.call('/user')
self.assertEqual(email, info['user']['email'])
def test_new_user_has_zero_credit(self):
with self.client:
self.login('[email protected]')
info = self.call('/user')
self.assertEqual(0, info['user']['credit'])
def test_user_cannot_change_his_credit(self):
with self.client:
user = self.login('[email protected]')
self.assertEqual(0, user['credit'])
userUrl = '/api/user/{}'.format(user['id'])
self.call(userUrl, credit=20, expectedStatus=405)
self.assertEqual(0, self.call('/user')['user']['credit'])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
the-stack_0_2228 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard',
'Premium' or 'Basic'. Possible values include: 'Standard', 'Premium',
'Basic'
:type tier: str or
~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2018_11_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, *, name: str=None, tier=None, family=None, **kwargs) -> None:
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.family = family
|
the-stack_0_2229 | """
Canny edge detection adapted from https://github.com/DCurro/CannyEdgePytorch
"""
import torch
import torch.nn as nn
import numpy as np
from scipy.signal.windows import gaussian
class CannyEdgeDetector(nn.Module):
def __init__(self,
non_max_suppression=True,
gaussian_filter_std=1.0,
gaussian_filter_size=5,
threshold=0.2):
super(CannyEdgeDetector, self).__init__()
self.threshold = threshold
self.non_max_suppression = non_max_suppression
# Gaussian filter for smoothing
gaussian_filter = gaussian(gaussian_filter_size, std=gaussian_filter_std).reshape([1, gaussian_filter_size])
gaussian_filter = gaussian_filter / gaussian_filter.sum()
self.gaussian_filter_horizontal = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(1, gaussian_filter_size),
padding=(0, gaussian_filter_size // 2),
bias=False)
# self.gaussian_filter_horizontal.weight[:] = torch.from_numpy(gaussian_filter).float()
self.gaussian_filter_horizontal.weight.data = torch.from_numpy(gaussian_filter).float()[None, None, :, :]
self.gaussian_filter_vertical = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=(gaussian_filter_size, 1),
padding=(gaussian_filter_size // 2, 0),
bias=False)
# self.gaussian_filter_vertical.weight[:] = torch.from_numpy(gaussian_filter.T)
self.gaussian_filter_vertical.weight.data = torch.from_numpy(gaussian_filter.T).float()[None, None, :, :]
# Sobel filter for gradient
sobel_filter = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
self.sobel_filter_horizontal = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=sobel_filter.shape,
padding=sobel_filter.shape[0] // 2,
bias=False)
# self.sobel_filter_horizontal.weight[:] = torch.from_numpy(sobel_filter).float()
self.sobel_filter_horizontal.weight.data = torch.from_numpy(sobel_filter).float()[None, None, :, :]
self.sobel_filter_vertical = nn.Conv2d(in_channels=1,
out_channels=1,
kernel_size=sobel_filter.shape,
padding=sobel_filter.shape[0] // 2,
bias=False)
# self.sobel_filter_vertical.weight[:] = torch.from_numpy(sobel_filter.T).float()
self.sobel_filter_vertical.weight.data = torch.from_numpy(sobel_filter.T).float()[None, None, :, :]
# Directional filters for non-max suppression (edge thinning) using gradient orientations.
# filters were flipped manually
if self.non_max_suppression:
filter_0 = np.array([[0, 0, 0],
[0, 1, -1],
[0, 0, 0]])
filter_45 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, -1]])
filter_90 = np.array([[0, 0, 0],
[0, 1, 0],
[0, -1, 0]])
filter_135 = np.array([[0, 0, 0],
[0, 1, 0],
[-1, 0, 0]])
filter_180 = np.array([[0, 0, 0],
[-1, 1, 0],
[0, 0, 0]])
filter_225 = np.array([[-1, 0, 0],
[0, 1, 0],
[0, 0, 0]])
filter_270 = np.array([[0, -1, 0],
[0, 1, 0],
[0, 0, 0]])
filter_315 = np.array([[0, 0, -1],
[0, 1, 0],
[0, 0, 0]])
all_filters = np.stack([filter_0, filter_45, filter_90, filter_135, filter_180, filter_225, filter_270, filter_315])
self.directional_filter = nn.Conv2d(in_channels=1,
out_channels=8,
kernel_size=filter_0.shape,
padding=filter_0.shape[-1] // 2,
bias=False)
# self.directional_filter.weight[:] = torch.from_numpy(all_filters[:, None, ...])
self.directional_filter.weight.data = torch.from_numpy(all_filters[:, None, :, :]).float()
def forward(self, img):
"""
:param img: (batch_size, num_channels, img_wh, img_wh)
:return:
"""
batch_size = img.shape[0]
num_channels = img.shape[1]
blurred_img = torch.zeros_like(img) # (batch_size, num_channels, img_wh, img_wh)
grad_x = torch.zeros((batch_size, 1, *img.shape[2:]), device=img.device) # (batch_size, 1, img_wh, img_wh)
grad_y = torch.zeros((batch_size, 1, *img.shape[2:]), device=img.device) # (batch_size, 1, img_wh, img_wh)
for c in range(num_channels):
# Gaussian smoothing
blurred = self.gaussian_filter_vertical(self.gaussian_filter_horizontal(img[:, [c], :, :])) # (batch_size, 1, img_wh, img_wh)
blurred_img[:, [c]] = blurred
# Gradient
grad_x += self.sobel_filter_horizontal(blurred) # (batch_size, 1, img_wh, img_wh)
grad_y += self.sobel_filter_vertical(blurred) # (batch_size, 1, img_wh, img_wh)
# Gradient magnitude and orientation
grad_x, grad_y = grad_x / num_channels, grad_y / num_channels # Average per-pixel gradients over channels
grad_magnitude = (grad_x ** 2 + grad_y ** 2) ** 0.5 # Per-pixel gradient magnitude
grad_orientation = torch.atan2(grad_y, grad_x) * (180.0/np.pi) + 180.0 # Per-pixel gradient orientation in degrees with range (0°, 360°)
grad_orientation = torch.round(grad_orientation / 45.0) * 45.0 # Bin gradient orientations
# Thresholding
thresholded_grad_magnitude = grad_magnitude.clone()
thresholded_grad_magnitude[grad_magnitude < self.threshold] = 0.0
output = {'blurred_img': blurred_img, # (batch_size, num_channels, img_wh, img_wh)
'grad_magnitude': grad_magnitude, # (batch_size, 1, img_wh, img_wh)
'grad_orientation': grad_orientation, # (batch_size, 1, img_wh, img_wh)
'thresholded_grad_magnitude': thresholded_grad_magnitude} # (batch_size, 1, img_wh, img_wh)
assert grad_magnitude.size() == grad_orientation.size() == thresholded_grad_magnitude.size()
# Non-max suppression (edge thinning)
if self.non_max_suppression:
all_direction_filtered = self.directional_filter(grad_magnitude) # (batch_size, 8, img_wh, img_wh)
positive_idx = (grad_orientation / 45) % 8 # (batch_size, 1, img_wh, img_wh) Index of positive gradient direction (0: 0°, ..., 7: 315°) at each pixel
thin_edges = grad_magnitude.clone() # (batch_size, 1, img_wh, img_wh)
for pos_i in range(4):
neg_i = pos_i + 4
is_oriented_i = (positive_idx == pos_i) * 1
is_oriented_i = is_oriented_i + (positive_idx == neg_i) * 1 # > 0 if pixel is oriented in pos_i or neg_i direction
pos_directional = all_direction_filtered[:, pos_i]
neg_directional = all_direction_filtered[:, neg_i]
selected_direction = torch.stack([pos_directional, neg_directional])
# get the local maximum pixels for the angle
is_max = selected_direction.min(dim=0)[0] > 0.0 # Check if pixel greater than neighbours in pos_i and neg_i directions.
is_max = torch.unsqueeze(is_max, dim=1)
# apply non maximum suppression
to_remove = (is_max == 0) * 1 * (is_oriented_i) > 0
thin_edges[to_remove] = 0.0
thresholded_thin_edges = thin_edges.clone()
thresholded_thin_edges[thin_edges < self.threshold] = 0.0
output['thin_edges'] = thin_edges
output['thresholded_thin_edges'] = thresholded_thin_edges
return output
|
the-stack_0_2231 | import time
import orjson
import asyncio
import websockets
from typing import Optional
from enum import IntEnum
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from websockets.exceptions import ConnectionClosedError, ConnectionClosed, ConnectionClosedOK
from athanor.app import Service
UNKNOWN = "UNKNOWN"
class MudProtocol(IntEnum):
TELNET = 0
WEBSOCKET = 1
def __str__(self):
if self == 0:
return "Telnet"
elif self == 1:
return "WebSocket"
else:
return "Unknown"
#Shamelessly yoinked this IntEnum from Rich for K.I.S.S. purposes.
class ColorSystem(IntEnum):
"""One of the 3 color system supported by terminals."""
STANDARD = 1
EIGHT_BIT = 2
TRUECOLOR = 3
WINDOWS = 4
COLOR_MAP = {
"ansi": ColorSystem.STANDARD,
"xterm256": ColorSystem.EIGHT_BIT,
"truecolor": ColorSystem.TRUECOLOR
}
@dataclass_json
@dataclass
class ConnectionDetails:
protocol: MudProtocol = 0
client_id: str = UNKNOWN
client_name: str = UNKNOWN
client_version: str = UNKNOWN
host_address: str = UNKNOWN
host_name: str = UNKNOWN
host_port: int = 0
connected: float = time.time()
utf8: bool = False
color: Optional[ColorSystem] = None
screen_reader: bool = False
proxy: bool = False
osc_color_palette: bool = False
vt100: bool = False
mouse_tracking: bool = False
naws: bool = False
width: int = 78
height: int = 24
mccp2: bool = False
mccp2_active: bool = False
mccp3: bool = False
mccp3_active: bool = False
mtts: bool = False
ttype: bool = False
mnes: bool = False
suppress_ga: bool = False
force_endline: bool = False
linemode: bool = False
mssp: bool = False
mxp: bool = False
mxp_active: bool = False
oob: bool = False
class ConnectionInMessageType(IntEnum):
GAMEDATA = 0
CONNECT = 1
READY = 2
MSSP = 4
DISCONNECT = 5
UPDATE = 6
@dataclass_json
@dataclass
class ConnectionInMessage:
msg_type: ConnectionInMessageType
client_id: str
data: Optional[object]
class ConnectionOutMessageType(IntEnum):
GAMEDATA = 0
MSSP = 1
DISCONNECT = 2
@dataclass_json
@dataclass
class ConnectionOutMessage:
msg_type: ConnectionOutMessageType
client_id: str
data: Optional[object]
class PortalOutMessageType(IntEnum):
EVENTS = 0
HELLO = 1
SYSTEM = 2
@dataclass_json
@dataclass
class PortalOutMessage:
msg_type: PortalOutMessageType
process_id: int
data: Optional[object]
class ServerInMessageType(IntEnum):
EVENTS = 0
HELLO = 1
SYSTEM = 2
@dataclass_json
@dataclass
class ServerInMessage:
msg_type: ServerInMessageType
process_id: int
data: Optional[object]
class LinkProtocol:
def __init__(self, service, ws, path):
self.service = service
self.connection = ws
self.path = path
self.outbox = asyncio.Queue()
self.task = None
self.running = False
async def run(self):
self.running = True
self.task = asyncio.create_task(self.run_tasks())
await self.task
self.running = False
async def run_tasks(self):
await asyncio.gather(self.read(), self.write())
async def read(self):
try:
async for message in self.connection:
await self.process_message(message)
except ConnectionClosedError:
self.running = False
self.task.cancel()
except ConnectionClosedOK:
self.running = False
self.task.cancel()
except ConnectionClosed:
self.running = False
self.task.cancel()
async def write(self):
while self.running:
msg = await self.outbox.get()
#print(f"{self.service.app.config.name.upper()} SENDING MESSAGE: {msg}")
if isinstance(msg, str):
await self.connection.send(msg)
else:
await self.connection.send(orjson.dumps(msg))
async def process_message(self, message):
#print(f"{self.service.app.config.name.upper()} RECEIVED MESSAGE: {message}")
if isinstance(message, bytes):
data = orjson.loads(message.decode())
await self.service.message_from_link(data)
else:
print(f"{self.service.app.config.name} got unknown websocket message: {message}")
class LinkService(Service):
def __init__(self, app):
super().__init__(app)
self.app.link = self
self.link: Optional[LinkProtocol] = None
self.interface: Optional[str] = None
self.port: int = 0
self.in_events: Optional[asyncio.Queue] = None
self.out_events: Optional[asyncio.Queue] = None
def setup(self):
link_conf = self.app.config.link
interface = self.app.config.interfaces.get(link_conf["interface"], None)
if interface is None:
raise ValueError("Portal must have a link interface!")
self.interface = interface
port = int(link_conf["port"])
if port < 0 or port > 65535:
raise ValueError(f"Invalid port: {port}. Port must be 16-bit unsigned integer")
self.port = port
async def async_setup(self):
self.in_events = asyncio.Queue()
self.out_events = asyncio.Queue()
async def async_run(self):
pass
async def handle_in_events(self):
pass
async def handle_out_events(self):
pass
def new_link(self, ws, path):
link = LinkProtocol(self, ws, path)
if self.link:
self.close_link()
self.link = link
self.on_new_link()
return link.run()
def on_new_link(self):
pass
def close_link(self):
pass
async def message_from_link(self, message):
pass
class LinkServiceServer(LinkService):
def __init__(self, app):
super().__init__(app)
self.listener = None
async def async_run(self):
await asyncio.gather(self.listener, self.handle_in_events(), self.handle_out_events())
async def async_setup(self):
await super().async_setup()
self.listener = websockets.serve(self.new_link, self.interface, self.port)
class LinkServiceClient(LinkService):
async def async_run(self):
await asyncio.gather(self.async_link(), self.handle_in_events(), self.handle_out_events())
async def async_link(self):
url = f"ws://{self.interface}:{self.port}"
while True:
async with websockets.connect(url) as ws:
self.link = LinkProtocol(self, ws, "/")
self.on_new_link()
await self.link.run()
await asyncio.sleep(0.1)
async def handle_in_events(self):
while True:
msg = await self.in_events.get()
await self.app.conn.in_events.put(msg)
async def handle_out_events(self):
while True:
if self.link:
msg = await self.out_events.get()
await self.link.outbox.put(msg)
else:
await asyncio.sleep(1) |
the-stack_0_2233 | import inspect
from typing import Callable, Type
from open_mafia_engine.util.repr import ReprMixin
class MafiaError(Exception, ReprMixin):
"""Base class for Mafia exceptions."""
class MafiaAmbiguousTypeName(MafiaError):
"""The type name conficts with an existing name."""
def __init__(self, existing_type: Type[object], new_type: Type[object]) -> None:
self.existing_type = existing_type
self.new_type = new_type
self.type_name = type_name = existing_type.__qualname__
super().__init__(
f"""Type {type_name!r} conficts with existing type.
Existing type defined in: {inspect.getmodule(existing_type)}
New type defined in: {inspect.getmodule(new_type)}
"""
)
class MafiaTypeNotFound(MafiaError):
"""The type was not found."""
def __init__(self, type_name: str) -> None:
self.type_name = type_name
super().__init__(f"Couldn't find GameObject subtype {type_name!r}")
class MafiaConverterError(MafiaError, TypeError):
"""Could not convert object to the requested type."""
def __init__(self, obj: str, type_: Type):
self.obj = obj
self.type_ = type_
super().__init__(f"Couldn't convert {obj!r} to {type_!r}")
class MafiaBadHandler(MafiaError, TypeError):
"""Function can't be used as an event handler."""
def __init__(self, func: Callable):
self.func = func
super().__init__(f"Function isn't a legal event handler: {func!r}")
class MafiaBadBuilder(MafiaError, TypeError):
"""Function can't be used as a game builder."""
def __init__(self, func: Callable):
self.func = func
super().__init__(f"Function isn't a legal game builder: {func!r}")
|
the-stack_0_2235 | '''
Manage the pipeline : reading the logs, parsing them and generating stats.
'''
import os
import time
from monilog.parser import Parser
from monilog.statistics import Statistics
from monilog.utils import init_logger
HIGH_TRAFFIC_DUR = 2*60
STAT_DUR = 10
MAX_IDLE_TIME = 5*60
class MonilogPipeline:
'''
Read logs and generates statistics.
Args:
file (str): The file with the logs to monitor.
threshold (int): Max traffic entries for the past 2 mn.
stop (bool): Whether to stop the monitoring.
'''
def __init__(self,
file='/tmp/access.log',
threshold=10):
self.file = file
self.threshold = threshold
self.stop = False
def stop_monitoring(self):
'''
To call when the monitoring app should be stopped.
'''
self.stop = True
def run(self):
'''
Run the monitoring pipeline.
'''
parser = Parser()
get_stats = Statistics(STAT_DUR)
alert = False
high_traffic_nb = 0
traffic_buffer = []
if not os.path.exists(self.file):
time.sleep(1)
file = open(self.file, 'r', os.O_NONBLOCK)
stat_time = time.time()
high_traffic_time = time.time()
start_idle_time = None
idle_duration = 0
logger = init_logger()
while not self.stop:
line = file.readline()
if not line:
if not start_idle_time:
start_idle_time = time.time()
else:
idle_duration = time.time() - start_idle_time
if idle_duration > MAX_IDLE_TIME:
logger.info(
'Stopping monitoring : Logging app not used for %d s.\n'
% (int(idle_duration))
)
self.stop = True
else:
start_idle_time = None
idle_duration = 0
try:
parsed_line = parser(line)
except:
#logger.warning(f"There was an error parsing : {line}")
continue
traffic_buffer.append(
parsed_line
)
high_traffic_nb += 1
if time.time() - stat_time >= STAT_DUR:
logger.info('\n'+get_stats(traffic_buffer))
stat_time = time.time()
traffic_buffer = []
if time.time() - high_traffic_time >= HIGH_TRAFFIC_DUR:
if high_traffic_nb/HIGH_TRAFFIC_DUR > self.threshold and not alert:
alert = True
logger.warning(
"High traffic generated an alert - hits = %f, triggered at %s.\n"
% (
high_traffic_nb/HIGH_TRAFFIC_DUR,
time.strftime('%d/%b/%Y %H:%M:%S')
)
)
elif high_traffic_nb/HIGH_TRAFFIC_DUR <= self.threshold and alert:
logger.info(
"The high traffic alert is recovered at %s.\n"
% (time.strftime('%d/%b/%Y %H:%M:%S'))
)
high_traffic_time = time.time()
high_traffic_nb = 0
|
the-stack_0_2236 | import sys
sys.setrecursionlimit(500000)
class Solution:
# @param A : list of integers
# @return an integer
def solve(self, parents):
if not parents:
return 0
assert len(parents) >= 1
tree = make_tree(parents)
depth, max_dist = find_max_dist(tree)
return max_dist
class TreeNode:
__slots__ = ['childs']
def __init__(self):
self.childs = []
def append(self, child):
self.childs.append(child)
def make_tree(parents):
n = len(parents)
assert -1 in parents
root = parents.index(-1)
nodes = [TreeNode() for _id in range(n)]
for i,p in enumerate(parents):
if p==-1: continue
nodes[p].append(nodes[i])
assert parents[root] == -1
return nodes[root]
def find_max_dist(tree):
''' @return (depth, max_dist) '''
assert tree
if len(tree.childs) == 0:
return (0, 0)
dms = [find_max_dist(child) for child in tree.childs]
ds, ms = zip(*dms)
max_depth_so_far = 1+max(ds)
if len(tree.childs) == 1:
assert len(ds) == 1
max_dist_so_far = max(ds[0]+1, max(ms))
else:
max_dist_so_far = max(sum(top2_among(ds))+2, max(ms))
return (max_depth_so_far, max_dist_so_far)
def top2_among(ds):
top2 = Top2()
for d in ds:
top2.push(d)
return top2.fst, top2.snd
class Top2:
def __init__(self):
self.fst, self.snd = 0, 0
def push(self, n):
if self.fst <= n:
self.fst, self.snd = n, self.fst
elif self.snd < n:
self.snd = n
|
the-stack_0_2237 | """Documenter module docstring."""
import ast
import importlib
import inspect
import os
import re
import textwrap
from collections import namedtuple
from functools import lru_cache
from types import ModuleType
from typing import Any, Callable, Dict, GenericMeta, List, Optional, Pattern, Tuple, Type, Union
RECURSIVE_NODES = (ast.If, ast.IfExp, ast.Try, ast.With, ast.ExceptHandler)
# exactly two leading underscores, exactly two trailing underscores
# since we enforce one non-underscore after the two leading underscores,
# we put the rest in an optional group
RE_SPECIAL: Pattern = re.compile(r"^__[^_]([\w_]*[^_])?__$")
# at least two leading underscores, at most one trailing underscore
# since we enforce one non-underscore before the last,
# we make the previous characters optional with an asterisk
RE_CLASS_PRIVATE: Pattern = re.compile(r"^__[\w_]*[^_]_?$")
# at most one leading underscore, then whatever
RE_PRIVATE: Pattern = re.compile(r"^_[^_][\w_]*$")
CATEGORY_ATTRIBUTE = "attribute"
CATEGORY_METHOD = "method"
CATEGORY_FUNCTION = "function"
CATEGORY_MODULE = "module"
CATEGORY_CLASS = "class"
NAME_SPECIAL = ("special", lambda n: bool(RE_SPECIAL.match(n)))
NAME_CLASS_PRIVATE = ("class-private", lambda n: bool(RE_CLASS_PRIVATE.match(n)))
NAME_PRIVATE = ("private", lambda n: bool(RE_PRIVATE.match(n)))
NAME_PROPERTIES = {
CATEGORY_ATTRIBUTE: [NAME_SPECIAL, NAME_CLASS_PRIVATE, NAME_PRIVATE],
CATEGORY_METHOD: [NAME_SPECIAL, NAME_PRIVATE],
CATEGORY_FUNCTION: [NAME_PRIVATE],
CATEGORY_CLASS: [NAME_PRIVATE],
CATEGORY_MODULE: [NAME_SPECIAL, NAME_PRIVATE],
}
def node_is_docstring(node: ast.AST) -> bool:
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
def node_to_docstring(node: Union[ast.Expr, ast.Str]) -> str:
return node.value.s
def node_is_assignment(node: ast.AST) -> bool:
return isinstance(node, ast.Assign)
def node_to_names(node: ast.Assign) -> List[str]:
names = []
for target in node.targets:
if isinstance(target, ast.Attribute):
names.append(target.attr)
elif isinstance(target, ast.Name):
names.append(target.id)
return names
def get_name_properties(name: str, category: str) -> List[str]:
properties = []
for prop in NAME_PROPERTIES[category]:
if prop[1](name):
properties.append(prop[0])
return properties
def get_attribute_names_and_docstring(node1, node2):
if node_is_docstring(node2) and node_is_assignment(node1):
return node_to_names(node1), node_to_docstring(node2)
raise ValueError
@lru_cache(maxsize=None)
def get_attributes(module: ModuleType) -> List["Object"]:
with open(module.__file__) as stream:
code = stream.read()
initial_ast_body = ast.parse(code).body
return _get_attributes(initial_ast_body, name_prefix=module.__name__)
def _get_attributes(ast_body: list, name_prefix: str, properties: Optional[List[str]] = None) -> List["Object"]:
if not properties:
properties = []
documented_attributes = []
previous_node = None
for node in ast_body:
try:
names, docstring = get_attribute_names_and_docstring(previous_node, node)
except ValueError:
if isinstance(node, RECURSIVE_NODES):
documented_attributes.extend(_get_attributes(node.body, name_prefix, properties))
if isinstance(node, ast.Try):
documented_attributes.extend(_get_attributes(node.finalbody, name_prefix, properties))
elif isinstance(node, ast.FunctionDef) and node.name == "__init__":
documented_attributes.extend(_get_attributes(node.body, name_prefix))
elif isinstance(node, ast.ClassDef):
documented_attributes.extend(
_get_attributes(node.body, f"{name_prefix}.{node.name}", properties=["class"])
)
else:
for name in names:
documented_attributes.append(
Object(
category=CATEGORY_ATTRIBUTE,
path=f"{name_prefix}.{name}",
name=name,
docstring=docstring,
properties=properties + get_name_properties(name, CATEGORY_ATTRIBUTE),
)
)
previous_node = node
return documented_attributes
def import_object(path: str) -> Tuple[ModuleType, Any]:
"""
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path: the dot-separated path of the object.
Returns:
tuple: the imported module and obtained object.
"""
if not path:
raise ValueError(f"path must be a valid Python path, not {path}")
obj_parent_modules = path.split(".")
objects = []
while True:
try:
parent_module_path = ".".join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if len(obj_parent_modules) == 1:
raise ImportError("No module named '%s'" % obj_parent_modules[0])
objects.insert(0, obj_parent_modules.pop(-1))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
module = inspect.getmodule(current_object)
return module, current_object
ADMONITIONS = {
"note:": "note",
"see also:": "seealso",
"abstract:": "abstract",
"summary:": "summary",
"tldr:": "tldr",
"info:": "info",
"information:": "info",
"todo:": "todo",
"tip:": "tip",
"hint:": "hint",
"important:": "important",
"success:": "success",
"check:": "check",
"done:": "done",
"question:": "question",
"help:": "help",
"faq:": "faq",
"warning:": "warning",
"caution:": "caution",
"attention:": "attention",
"failure:": "failure",
"fail:": "fail",
"missing:": "missing",
"danger:": "danger",
"error:": "error",
"bug:": "bug",
"example:": "example",
"snippet:": "snippet",
"quote:": "quote",
"cite:": "cite",
}
def render_signature(signature):
# credits to https://github.com/tomchristie/mkautodoc
params = []
render_pos_only_separator = True
render_kw_only_separator = True
for parameter in signature.parameters.values():
value = parameter.name
if parameter.default is not parameter.empty:
value = f"{value}={parameter.default!r}"
if parameter.kind is parameter.VAR_POSITIONAL:
render_kw_only_separator = False
value = f"*{value}"
elif parameter.kind is parameter.VAR_KEYWORD:
value = f"**{value}"
elif parameter.kind is parameter.POSITIONAL_ONLY:
if render_pos_only_separator:
render_pos_only_separator = False
params.append("/")
elif parameter.kind is parameter.KEYWORD_ONLY:
if render_kw_only_separator:
render_kw_only_separator = False
params.append("*")
params.append(value)
return ", ".join(params)
def get_param_info(signature, param_name):
parameter = signature.parameters[param_name]
param_default = param_type = ""
if parameter.annotation is not parameter.empty:
if inspect.isclass(parameter.annotation) and not isinstance(parameter.annotation, GenericMeta):
param_type = parameter.annotation.__name__
else:
param_type = str(parameter.annotation).replace("typing.", "")
if parameter.kind is parameter.VAR_KEYWORD:
param_name = f"**{param_name}"
if parameter.default is not parameter.empty:
param_default = str(parameter.default)
return namedtuple("Param", "name default type")(param_name, param_default, param_type)
def get_return_type(signature):
ret = signature.return_annotation
if ret is not signature.empty:
if inspect.isclass(ret) and not isinstance(ret, GenericMeta):
ret_type = ret.__name__
else:
ret_type = str(ret).replace("typing.", "")
else:
ret_type = ""
return ret_type
def parse_docstring(docstring: str, signature) -> str:
"""
Parse a docstring!
Note:
to try notes.
Args:
docstring: this is the docstring to parse.
Raises:
OSError: no it doesn't lol.
Returns:
markdown: the docstring converted to a nice markdown text.
"""
params = {}
exceptions = {}
returns = ""
lines = docstring.split("\n")
new_lines = []
i = 0
while i < len(lines):
if lines[i].lower() in ("args:", "arguments:", "params:", "parameters:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and params[name]:
params[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
params[name] = description.lstrip(" ")
j += 1
new_lines.append("**Parameters**\n")
new_lines.append("| Name | Type | Description |")
new_lines.append("| ---- | ---- | ----------- |")
for param_name, param_description in params.items():
param_name, param_default, param_type = get_param_info(signature, param_name)
# if param_default:
# param_default = f"`{param_default}`"
new_lines.append(f"| `{param_name}` | `{param_type}` | {param_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("raise:", "raises:", "except:", "exceptions:"):
j = i + 1
name = None
while j < len(lines) and lines[j].startswith(" "):
if lines[j].startswith(" ") and exceptions[name]:
exceptions[name] += " " + lines[j].lstrip(" ")
else:
name, description = lines[j].lstrip(" ").split(":", 1)
exceptions[name] = description.lstrip(" ")
j += 1
new_lines.append("**Exceptions**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
for exception_name, exception_description in exceptions.items():
new_lines.append(f"| `{exception_name}` | {exception_description} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ("return:", "returns:"):
j = i + 1
while j < len(lines) and lines[j].startswith(" "):
description = lines[j].lstrip(" ")
returns += " " + description
j += 1
new_lines.append("**Returns**\n")
new_lines.append("| Type | Description |")
new_lines.append("| ---- | ----------- |")
new_lines.append(f"| `{get_return_type(signature)}` | {returns} |")
new_lines.append("")
i = j - 1
elif lines[i].lower() in ADMONITIONS.keys():
j = i + 1
admonition = []
while j < len(lines) and lines[j].startswith(" ") or lines[j] == "":
admonition.append(lines[j])
j += 1
new_lines.append(f"!!! {ADMONITIONS[lines[i].lower()]}")
new_lines.append("\n".join(admonition))
new_lines.append("")
i = j - 1
else:
new_lines.append(lines[i])
i += 1
return "\n".join(new_lines)
class Object:
"""
Class to store information about a Python object.
- the object category (ex: module, function, class, method or attribute)
- the object path (ex: `package.submodule.class.inner_class.method`)
- the object name (ex: `__init__`)
- the object docstring
- the object properties, depending on its category (ex: special, classmethod, etc.)
- the object signature (soon)
Each instance additionally stores references to its children, grouped by category (see Attributes).
"""
def __init__(
self,
category: str,
name: str,
path: str,
docstring: str,
properties: List[str],
signature: Optional[str] = None,
source: Optional[str] = None,
file: Optional[str] = None,
) -> None:
self.category = category
self.name = name
self.signature = signature or ""
self.path = path
self.docstring = docstring or ""
self.properties = properties
self.parent = None
self.source = source or ""
self.file = file or ""
self._path_map = {}
self.attributes = []
"""List of all the object's attributes."""
self.methods = []
"""List of all the object's methods."""
self.functions = []
"""List of all the object's functions."""
self.modules = []
"""List of all the object's submodules."""
self.classes = []
"""List of all the object's classes."""
self.children = []
"""List of all the object's children."""
def __str__(self):
return self.path
@property
def parent_path(self) -> str:
"""The parent's path, computed from the current path."""
return self.path.rsplit(".", 1)[0]
def add_child(self, obj: "Object") -> None:
"""Add an object as a child of this object."""
if obj.parent_path != self.path:
return
self.children.append(obj)
{
CATEGORY_ATTRIBUTE: self.attributes,
CATEGORY_METHOD: self.methods,
CATEGORY_FUNCTION: self.functions,
CATEGORY_MODULE: self.modules,
CATEGORY_CLASS: self.classes,
}.get(obj.category).append(obj)
obj.parent = self
self._path_map[obj.path] = obj
def add_children(self, children: List["Object"]) -> None:
"""Add a list of objects as children of this object."""
for child in children:
self.add_child(child)
def dispatch_attributes(self, attributes: List["Object"]) -> None:
for attribute in attributes:
try:
attach_to = self._path_map[attribute.parent_path]
except KeyError:
pass
else:
attach_to.attributes.append(attribute)
attach_to.children.append(attribute)
attribute.parent = attach_to
def render_references(self, base_url: str):
lines = [f"[{self.path}]: {base_url}#{self.path}"]
for child in self.children:
lines.append(child.render_references(base_url))
return "\n".join(lines)
def render(self, heading: int = 1, **config: Dict[str, Any]) -> str:
"""
Render this object as Markdown.
This is dirty and will be refactored as a Markdown extension soon.
Parameters:
heading: The initial level of heading to use.
config: The rendering configuration dictionary.
Returns:
The rendered Markdown.
"""
lines = []
show_top_object_heading = config.pop("show_top_object_heading", True)
show_top_object_full_path = config.pop("show_top_object_full_path", False)
if show_top_object_heading:
if self.docstring or not config["hide_no_doc"] or not self.parent:
signature = ""
toc_signature = ""
if self.category in (CATEGORY_FUNCTION, CATEGORY_METHOD):
if self.signature:
signature = f"({render_signature(self.signature)})"
toc_signature = "()"
object_heading = f"`:::python {self.path if show_top_object_full_path else self.name}{signature}`"
object_permalink = self.path.replace("__", r"\_\_")
object_toc = self.name.replace("__", r"\_\_") + toc_signature
properties = ", ".join(self.properties)
if properties:
object_heading += f"*({properties})*"
lines.append(
f"{'#' * heading} {object_heading} {{: #{object_permalink} data-toc-label='{object_toc}' }}"
)
if config["add_source_details"] and self.source:
lines.append("")
lines.append(f'??? note "Show source code"')
lines.append(f' ```python linenums="{self.source[1]}"')
lines.append(textwrap.indent("".join(self.source[0]), " "))
lines.append(" ```")
lines.append("")
if self.docstring:
lines.append(parse_docstring(self.docstring, self.signature))
lines.append("")
if config["group_by_categories"]:
lines.append(self.render_categories(heading + 1, **config,))
else:
for child in sorted(self.children, key=lambda o: o.name.lower()):
lines.append(child.render(heading + 1, **config,))
lines.append("")
return "\n".join(lines)
def render_categories(self, heading: int, **config):
extra_level = 1 if config["show_groups_headings"] else 0
lines = []
if self.attributes:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Attributes")
lines.append("")
for attribute in sorted(self.attributes, key=lambda o: o.name.lower()):
lines.append(attribute.render(heading + extra_level, **config))
lines.append("")
if self.classes:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Classes")
lines.append("")
for class_ in sorted(self.classes, key=lambda o: o.name.lower()):
lines.append(class_.render(heading + extra_level, **config))
lines.append("")
if self.methods:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Methods")
lines.append("")
for method in sorted(self.methods, key=lambda o: o.name.lower()):
lines.append(method.render(heading + extra_level, **config))
lines.append("")
if self.functions:
if config["show_groups_headings"]:
lines.append(f"{'#' * heading} Functions")
lines.append("")
for function in sorted(self.functions, key=lambda o: o.name.lower()):
lines.append(function.render(heading + extra_level, **config))
lines.append("")
return "\n".join(lines)
class Documenter:
"""Class that contains the object documentation loading mechanisms."""
def __init__(self, global_filters):
self.global_filters = [(f, re.compile(f.lstrip("!"))) for f in global_filters]
def get_object_documentation(self, import_string: str) -> Object:
"""
Documenting to see return type.
Return:
The object with all its children populated.
"""
module, obj = import_object(import_string)
path = module.__name__
if inspect.ismodule(obj):
root_object = self.get_module_documentation(obj, path)
elif inspect.isclass(obj):
path = f"{path}.{obj.__name__}"
root_object = self.get_class_documentation(obj, path)
elif inspect.isfunction(obj):
path = f"{path}.{obj.__name__}"
root_object = self.get_function_documentation(obj, path)
else:
raise ValueError(f"{obj}:{type(obj)} not yet supported")
attributes = get_attributes(module)
root_object.dispatch_attributes([a for a in attributes if not self.filter_name_out(a.name)])
return root_object
def get_module_documentation(self, module: ModuleType, path: str) -> Object:
module_name = path.split(".")[-1]
module_file_basename = os.path.splitext(os.path.basename(module.__file__))[0]
properties = get_name_properties(module_file_basename, CATEGORY_MODULE)
root_object = Object(
category=CATEGORY_MODULE,
name=module_name,
path=path,
docstring=inspect.getdoc(module),
properties=properties,
)
for member_name, member in inspect.getmembers(module):
if self.filter_name_out(member_name):
continue
member_path = f"{path}.{member_name}"
if inspect.isclass(member) and inspect.getmodule(member) == module:
root_object.add_child(self.get_class_documentation(member, member_path))
elif inspect.isfunction(member) and inspect.getmodule(member) == module:
root_object.add_child(self.get_function_documentation(member, member_path))
return root_object
def get_class_documentation(self, class_: Type[Any], path: str) -> Object:
class_name = class_.__name__
root_object = Object(
category=CATEGORY_CLASS,
name=class_name,
path=path,
docstring=inspect.getdoc(class_),
properties=get_name_properties(class_name, CATEGORY_CLASS),
signature=inspect.signature(class_),
)
for member_name, member in sorted(class_.__dict__.items()):
if self.filter_name_out(member_name):
continue
member_path = f"{path}.{member_name}"
if inspect.isclass(member):
root_object.add_child(self.get_class_documentation(member, member_path))
continue
actual_member = getattr(class_, member_name)
docstring = inspect.getdoc(actual_member)
try:
source = inspect.getsourcelines(actual_member)
except TypeError:
source = ""
if isinstance(member, classmethod):
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD) + ["classmethod"],
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, staticmethod):
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD) + ["staticmethod"],
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, type(lambda: 0)): # regular method
root_object.add_child(
Object(
category=CATEGORY_METHOD,
name=member_name,
path=member_path,
docstring=docstring,
properties=get_name_properties(member_name, CATEGORY_METHOD),
source=source,
signature=inspect.signature(actual_member),
)
)
elif isinstance(member, property):
properties = ["property"]
if member.fset is None:
properties.append("readonly")
root_object.add_child(
Object(
category=CATEGORY_ATTRIBUTE,
name=member_name,
path=member_path,
docstring=docstring,
properties=properties + get_name_properties(member_name, CATEGORY_ATTRIBUTE),
source=source,
signature=inspect.signature(actual_member.fget),
)
)
return root_object
def get_function_documentation(self, function: Callable, path: str) -> Object:
function_name = function.__name__
return Object(
category=CATEGORY_FUNCTION,
name=function_name,
path=path,
docstring=inspect.getdoc(function),
properties=get_name_properties(function_name, CATEGORY_FUNCTION),
source=inspect.getsourcelines(function),
signature=inspect.signature(function),
)
@lru_cache(maxsize=None)
def filter_name_out(self, name: str) -> bool:
keep = True
for f, regex in self.global_filters:
is_matching = bool(regex.match(name))
if is_matching:
if str(f).startswith("!"):
is_matching = not is_matching
keep = is_matching
return not keep
|
the-stack_0_2238 | import os
import asyncio
import pygame
import random
from functools import partial
import json
import asyncio
import websockets
import logging
import argparse
import time
from mapa import Map, Tiles
logging.basicConfig(level=logging.DEBUG)
logger_websockets = logging.getLogger("websockets")
logger_websockets.setLevel(logging.WARN)
logger = logging.getLogger("Map")
logger.setLevel(logging.DEBUG)
BOMBERMAN = {
"up": (3 * 16, 1 * 16),
"left": (0, 0),
"down": (3 * 16, 0),
"right": (0, 1 * 16),
}
BALLOOM = {
"up": (0, 15 * 16),
"left": (16, 15 * 16),
"down": (2 * 16, 15 * 16),
"right": (3 * 16, 15 * 16),
}
ONEAL = {
"up": (0, 16 * 16),
"left": (16, 16 * 16),
"down": (2 * 16, 16 * 16),
"right": (3 * 16, 16 * 16),
}
DOLL = {
"up": (0, 17 * 16),
"left": (16, 17 * 16),
"down": (2 * 16, 17 * 16),
"right": (3 * 16, 17 * 16),
}
MINVO = {
"up": (0, 18 * 16),
"left": (16, 18 * 16),
"down": (2 * 16, 18 * 16),
"right": (3 * 16, 18 * 16),
}
ENEMIES = {"Balloom": BALLOOM, "Oneal": ONEAL, "Doll": DOLL, "Minvo": MINVO}
POWERUPS = {"Bombs": (0, 14 * 16), "Flames": (1 * 16, 14 * 16), "Detonator": (4 * 16, 14 * 16)}
STONE = (48, 48)
WALL = (64, 48)
PASSAGE = (0, 64)
EXIT = (11 * 16, 3 * 16)
BOMB = [(32, 48), (16, 48), (0, 48)]
EXPLOSION = {
"c": (112, 96),
"l": (96, 96),
"r": (128, 96),
"u": (112, 80),
"d": (112, 112),
"xl": (80, 96),
"xr": (144, 96),
"xu": (112, 64),
"xd": (112, 128),
}
FALLOUT = {"c": (32, 96)}
CHAR_LENGTH = 16
CHAR_SIZE = CHAR_LENGTH, CHAR_LENGTH
SCALE = 1
COLORS = {
"white": (255, 255, 255),
"red": (255, 0, 0),
"pink": (255, 105, 180),
"blue": (135, 206, 235),
"orange": (255, 165, 0),
"yellow": (255, 255, 0),
"grey": (120, 120, 120),
}
BACKGROUND = (0, 0, 0)
RANKS = {
1: "1ST",
2: "2ND",
3: "3RD",
4: "4TH",
5: "5TH",
6: "6TH",
7: "7TH",
8: "8TH",
9: "9TH",
10: "10TH",
}
SPRITES = None
async def messages_handler(ws_path, queue):
async with websockets.connect(ws_path) as websocket:
await websocket.send(json.dumps({"cmd": "join"}))
while True:
r = await websocket.recv()
queue.put_nowait(r)
class GameOver(BaseException):
pass
class Artifact(pygame.sprite.Sprite):
def __init__(self, *args, **kw):
self.x, self.y = None, None # postpone to update_sprite()
x, y = kw.pop("pos", ((kw.pop("x", 0), kw.pop("y", 0))))
new_pos = scale((x, y))
self.image = pygame.Surface(CHAR_SIZE)
self.rect = pygame.Rect(new_pos + CHAR_SIZE)
self.update_sprite((x, y))
super().__init__()
def update_sprite(self, pos=None):
if not pos:
pos = self.x, self.y
else:
pos = scale(pos)
self.rect = pygame.Rect(pos + CHAR_SIZE)
self.image.fill((0, 0, 230))
self.image.blit(*self.sprite)
# self.image = pygame.transform.scale(self.image, scale((1, 1)))
self.x, self.y = pos
def update(self, *args):
self.update_sprite()
class BomberMan(Artifact):
def __init__(self, *args, **kw):
self.direction = "left"
self.sprite = (SPRITES, (0, 0), (*BOMBERMAN[self.direction], *scale((1, 1))))
super().__init__(*args, **kw)
def update(self, new_pos):
x, y = scale(new_pos)
if x > self.x:
self.direction = "right"
if x < self.x:
self.direction = "left"
if y > self.y:
self.direction = "down"
if y < self.y:
self.direction = "up"
self.sprite = (SPRITES, (0, 0), (*BOMBERMAN[self.direction], *scale((1, 1))))
self.update_sprite(tuple(new_pos))
class Enemy(Artifact):
def __init__(self, *args, **kw):
self.direction = "left"
self.name = kw.pop("name")
self.sprite = (
SPRITES,
(0, 0),
(*ENEMIES[self.name][self.direction], *scale((1, 1))),
)
super().__init__(*args, **kw)
def update(self, new_pos):
x, y = scale(new_pos)
if x > self.x:
self.direction = "right"
if x < self.x:
self.direction = "left"
if y > self.y:
self.direction = "down"
if y < self.y:
self.direction = "up"
self.sprite = (
SPRITES,
(0, 0),
(*ENEMIES[self.name][self.direction], *scale((1, 1))),
)
self.update_sprite(new_pos)
class Bomb(Artifact):
def __init__(self, *args, **kw):
self.index = 0
self.sprite = (SPRITES, (0, 0), (*BOMB[self.index], *scale((1, 1))))
self.exploded = False
self.timeout = kw.pop("timeout", -1)
self.radius = kw.pop("radius", 0)
super().__init__(*args, **kw)
def update(self, bombs_state):
for pos, timeout, radius in bombs_state:
if scale(pos) == (self.x, self.y):
# It's me!
self.timeout = int(timeout)
self.radius = radius
self.index = (self.index + 1) % len(BOMB)
self.sprite = (SPRITES, (0, 0), (*BOMB[self.index], *scale((1, 1))))
self.update_sprite()
if self.timeout == 0:
self.exploded = True
self.sprite = ()
self.rect.inflate_ip(
self.radius * 2 * CHAR_LENGTH, self.radius * 2 * CHAR_LENGTH
)
self.image = pygame.Surface(
(
self.radius * 2 * CHAR_LENGTH + CHAR_LENGTH,
self.radius * 2 * CHAR_LENGTH + CHAR_LENGTH,
)
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius)),
(*EXPLOSION["c"], *scale((1, 1))),
)
for r in range(1, self.radius):
self.image.blit(
SPRITES,
scale((self.radius - r, self.radius)),
(*EXPLOSION["l"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius + r, self.radius)),
(*EXPLOSION["r"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius - r)),
(*EXPLOSION["u"], *scale((1, 1))),
)
self.image.blit(
SPRITES,
scale((self.radius, self.radius + r)),
(*EXPLOSION["d"], *scale((1, 1))),
)
self.image.blit(
SPRITES, scale((0, self.radius)), (*EXPLOSION["xl"], *scale((1, 1)))
)
self.image.blit(
SPRITES,
scale((2 * self.radius, self.radius)),
(*EXPLOSION["xr"], *scale((1, 1))),
)
self.image.blit(
SPRITES, scale((self.radius, 0)), (*EXPLOSION["xu"], *scale((1, 1)))
)
self.image.blit(
SPRITES,
scale((self.radius, 2 * self.radius)),
(*EXPLOSION["xd"], *scale((1, 1))),
)
class Wall(Artifact):
def __init__(self, *args, **kw):
self.sprite = (SPRITES, (0, 0), (*WALL, *scale((1, 1))))
super().__init__(*args, **kw)
class Exit(Artifact):
def __init__(self, *args, **kw):
self.sprite = (SPRITES, (0, 0), (*EXIT, *scale((1, 1))))
super().__init__(*args, **kw)
class Powerups(Artifact):
def __init__(self, *args, **kw):
self.type = kw.pop("name")
self.sprite = (SPRITES, (0, 0), (*POWERUPS[self.type], *scale((1, 1))))
super().__init__(*args, **kw)
def clear_callback(surf, rect):
"""beneath everything there is a passage."""
surf.blit(SPRITES, (rect.x, rect.y), (*PASSAGE, rect.width, rect.height))
def scale(pos):
x, y = pos
return int(x * CHAR_LENGTH / SCALE), int(y * CHAR_LENGTH / SCALE)
def draw_background(mapa):
background = pygame.Surface(scale((int(mapa.size[0]), int(mapa.size[1]))))
for x in range(int(mapa.size[0])):
for y in range(int(mapa.size[1])):
wx, wy = scale((x, y))
if mapa.map[x][y] == Tiles.STONE:
background.blit(SPRITES, (wx, wy), (*STONE, *scale((1, 1))))
else:
background.blit(SPRITES, (wx, wy), (*PASSAGE, *scale((1, 1))))
return background
def draw_info(SCREEN, text, pos, color=(0, 0, 0), background=None):
myfont = pygame.font.Font(None, int(22 / SCALE))
textsurface = myfont.render(text, True, color, background)
x, y = pos
if x > SCREEN.get_width():
pos = SCREEN.get_width() - textsurface.get_width(), y
if y > SCREEN.get_height():
pos = x, SCREEN.get_height() - textsurface.get_height()
if background:
SCREEN.blit(background, pos)
else:
erase = pygame.Surface(textsurface.get_size())
erase.fill(COLORS["grey"])
# SCREEN.blit(erase, pos)
SCREEN.blit(textsurface, pos)
async def main_loop(q):
while True:
await main_game()
async def main_game():
global SPRITES, SCREEN
main_group = pygame.sprite.LayeredUpdates()
bombs_group = pygame.sprite.OrderedUpdates()
enemies_group = pygame.sprite.OrderedUpdates()
walls_group = pygame.sprite.OrderedUpdates()
logging.info("Waiting for map information from server")
state = await q.get() # first state message includes map information
logging.debug("Initial game status: %s", state)
newgame_json = json.loads(state)
GAME_SPEED = newgame_json["fps"]
mapa = Map(size=newgame_json["size"], mapa=newgame_json["map"])
TIMEOUT = newgame_json["timeout"]
SCREEN = pygame.display.set_mode(scale(mapa.size))
SPRITES = pygame.image.load("data/nes.png").convert_alpha()
BACKGROUND = draw_background(mapa)
SCREEN.blit(BACKGROUND, (0, 0))
main_group.add(BomberMan(pos=mapa.bomberman_spawn))
state = {"score": 0, "player": "player1", "bomberman": (1, 1)}
while True:
pygame.event.pump()
if pygame.key.get_pressed()[pygame.K_ESCAPE]:
asyncio.get_event_loop().stop()
main_group.clear(SCREEN, clear_callback)
bombs_group.clear(SCREEN, BACKGROUND)
enemies_group.clear(SCREEN, clear_callback)
if "score" in state and "player" in state:
text = str(state["score"])
draw_info(SCREEN, text.zfill(6), (0, 0))
text = str(state["player"]).rjust(32)
draw_info(SCREEN, text, (4000, 0))
if "bombs" in state:
for bomb in bombs_group:
if bomb.exploded:
bombs_group.remove(bomb)
if len(bombs_group.sprites()) < len(state["bombs"]):
pos, timeout, radius = state["bombs"][-1]
bombs_group.add(Bomb(pos=pos, timeout=timeout, radius=radius))
bombs_group.update(state["bombs"])
if "enemies" in state:
enemies_group.empty()
for enemy in state["enemies"]:
enemies_group.add(Enemy(name=enemy["name"], pos=enemy["pos"]))
if "walls" in state:
walls_group.empty()
for wall in state["walls"]:
walls_group.add(Wall(pos=wall))
if "exit" in state and len(state["exit"]):
if not [p for p in main_group if isinstance(p, Exit)]:
logger.debug("Add Exit")
ex = Exit(pos=state["exit"])
main_group.add(ex)
main_group.move_to_back(ex)
if "powerups" in state:
for pos, name in state["powerups"]:
if name not in [p.type for p in main_group if isinstance(p, Powerups)]:
logger.debug(f"Add {name}")
p = Powerups(pos=pos, name=name)
main_group.add(p)
main_group.move_to_back(p)
for powerup in main_group:
if isinstance(powerup, Powerups):
name = powerup.type
if name not in [p[1] for p in state["powerups"]]:
logger.debug(f"Remove {name}")
main_group.remove(powerup)
walls_group.draw(SCREEN)
main_group.draw(SCREEN)
enemies_group.draw(SCREEN)
bombs_group.draw(SCREEN)
# Highscores Board
if (
("lives" in state and state["lives"] == 0)
or ("step" in state and state["step"] >= TIMEOUT)
or (
"bomberman" in state
and "exit" in state
and state["bomberman"] == state["exit"]
and "enemies" in state
and state["enemies"] == []
)
):
highscores = newgame_json["highscores"]
HIGHSCORES = pygame.Surface(scale((20, 16)))
HIGHSCORES.fill(COLORS["grey"])
draw_info(HIGHSCORES, "THE 10 BEST PLAYERS", scale((5, 1)), COLORS["white"])
draw_info(HIGHSCORES, "RANK", scale((2, 3)), COLORS["orange"])
draw_info(HIGHSCORES, "SCORE", scale((6, 3)), COLORS["orange"])
draw_info(HIGHSCORES, "NAME", scale((11, 3)), COLORS["orange"])
for i, highscore in enumerate(highscores):
c = (i % 5) + 1
draw_info(
HIGHSCORES,
RANKS[i + 1],
scale((2, i + 5)),
list(COLORS.values())[c],
)
draw_info(
HIGHSCORES,
str(highscore[1]),
scale((6, i + 5)),
list(COLORS.values())[c],
)
draw_info(
HIGHSCORES,
highscore[0],
scale((11, i + 5)),
list(COLORS.values())[c],
)
SCREEN.blit(
HIGHSCORES,
(
(SCREEN.get_width() - HIGHSCORES.get_width()) / 2,
(SCREEN.get_height() - HIGHSCORES.get_height()) / 2,
),
)
if "bomberman" in state:
main_group.update(state["bomberman"])
pygame.display.flip()
try:
state = json.loads(q.get_nowait())
if (
"step" in state
and state["step"] == 1
or "level" in state
and state["level"] != mapa.level
):
# New level! lets clean everything up!
SCREEN.blit(BACKGROUND, (0, 0))
walls_group.empty()
main_group.empty()
enemies_group.empty()
bombs_group.empty()
main_group.add(BomberMan(pos=mapa.bomberman_spawn))
mapa.level = state["level"]
except asyncio.queues.QueueEmpty:
await asyncio.sleep(1.0 / GAME_SPEED)
continue
if __name__ == "__main__":
SERVER = os.environ.get("SERVER", "localhost")
PORT = os.environ.get("PORT", "8000")
parser = argparse.ArgumentParser()
parser.add_argument("--server", help="IP address of the server", default=SERVER)
parser.add_argument(
"--scale", help="reduce size of window by x times", type=int, default=1
)
parser.add_argument("--port", help="TCP port", type=int, default=PORT)
args = parser.parse_args()
SCALE = args.scale
LOOP = asyncio.get_event_loop()
pygame.font.init()
q = asyncio.Queue()
ws_path = f"ws://{args.server}:{args.port}/viewer"
try:
LOOP.run_until_complete(
asyncio.gather(messages_handler(ws_path, q), main_loop(q))
)
finally:
LOOP.stop()
|
the-stack_0_2240 | from in_data import in_data
from random import choice
import operator
from GA import *
class main:
DeliveryPoints = in_data()
nth_population = 0
BestSolution = None
population= Inicial_Population(DeliveryPoints)
while nth_population < EndPoint:
nth_population+=1
Population_Fitness = PopulationFitness(population, DeliveryPoints)
if BestSolution == None:
BestSolution = max(Population_Fitness.items(), key=operator.itemgetter(1))
else:
b = max(Population_Fitness.items(), key=operator.itemgetter(1))
if b[1] > BestSolution[1]:
BestSolution = max(Population_Fitness.items(), key=operator.itemgetter(1))
selection = Selection(Population_Fitness)
i = 0
#crossing over
NextGeneration = []
while i < PopulationSize:
while True:
p, j = choice(list(selection.values())), choice(list(selection.values()))
p1 = [i for i in p]
p2 = [i for i in j]
while p2 == p1:
j = choice(list(selection.values()))
p2 = [i for i in j]
if p2 != p1: break
f1, f2 = CrossingOver(p1, p2)
if f1 not in NextGeneration:
NextGeneration.append(f1)
i+=1
if f2 not in NextGeneration:
NextGeneration.append(f2)
i+=1
#mutation
population = Mutation(NextGeneration)
print(BestSolution[0])
|
the-stack_0_2241 | # coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model evaluation tools for TFGAN.
These methods come from https://arxiv.org/abs/1606.03498 and
https://arxiv.org/abs/1706.08500.
NOTE: This implementation uses the same weights as in
https://github.com/openai/improved-gan/blob/master/inception_score/model.py,
but is more numerically stable and is an unbiased estimator of the true
Inception score even when splitting the inputs into batches.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sys
import tarfile
# Dependency imports
from six.moves import urllib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
__all__ = [
'get_graph_def_from_disk',
'get_graph_def_from_resource',
'get_graph_def_from_url_tarball',
'preprocess_image',
'run_image_classifier',
'run_inception',
'inception_score',
'classifier_score',
'classifier_score_from_logits',
'frechet_inception_distance',
'frechet_classifier_distance',
'frechet_classifier_distance_from_activations',
'INCEPTION_DEFAULT_IMAGE_SIZE',
]
INCEPTION_URL = 'http://download.tensorflow.org/models/frozen_inception_v1_2015_12_05.tar.gz'
INCEPTION_FROZEN_GRAPH = 'inceptionv1_for_inception_score.pb'
INCEPTION_INPUT = 'Mul:0'
INCEPTION_OUTPUT = 'logits:0'
INCEPTION_FINAL_POOL = 'pool_3:0'
INCEPTION_DEFAULT_IMAGE_SIZE = 299
def _validate_images(images, image_size):
images = ops.convert_to_tensor(images)
images.shape.with_rank(4)
images.shape.assert_is_compatible_with(
[None, image_size, image_size, None])
return images
def _symmetric_matrix_square_root(mat, eps=1e-10):
"""Compute square root of a symmetric matrix.
Note that this is different from an elementwise square root. We want to
compute M' where M' = sqrt(mat) such that M' * M' = mat.
Also note that this method **only** works for symmetric matrices.
Args:
mat: Matrix to take the square root of.
eps: Small epsilon such that any element less than eps will not be square
rooted to guard against numerical instability.
Returns:
Matrix square root of mat.
"""
# Unlike numpy, tensorflow's return order is (s, u, v)
s, u, v = linalg_ops.svd(mat)
# sqrt is unstable around 0, just use 0 in such case
si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))
# Note that the v returned by Tensorflow is v = V
# (when referencing the equation A = U S V^T)
# This is unlike Numpy which returns v = V^T
return math_ops.matmul(
math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)
def preprocess_image(
images, height=INCEPTION_DEFAULT_IMAGE_SIZE,
width=INCEPTION_DEFAULT_IMAGE_SIZE, scope=None):
"""Prepare a batch of images for evaluation.
This is the preprocessing portion of the graph from
http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz.
Note that it expects Tensors in [0, 255]. This function maps pixel values to
[-1, 1] and resizes to match the InceptionV1 network.
Args:
images: 3-D or 4-D Tensor of images. Values are in [0, 255].
height: Integer. Height of resized output image.
width: Integer. Width of resized output image.
scope: Optional scope for name_scope.
Returns:
3-D or 4-D float Tensor of prepared image(s). Values are in [-1, 1].
"""
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
resized = (resized - 128.0) / 128.0
if is_single:
resized = array_ops.squeeze(resized, axis=0)
return resized
def _kl_divergence(p, p_logits, q):
"""Computes the Kullback-Liebler divergence between p and q.
This function uses p's logits in some places to improve numerical stability.
Specifically:
KL(p || q) = sum[ p * log(p / q) ]
= sum[ p * ( log(p) - log(q) ) ]
= sum[ p * ( log_softmax(p_logits) - log(q) ) ]
Args:
p: A 2-D floating-point Tensor p_ij, where `i` corresponds to the minibatch
example and `j` corresponds to the probability of being in class `j`.
p_logits: A 2-D floating-point Tensor corresponding to logits for `p`.
q: A 1-D floating-point Tensor, where q_j corresponds to the probability
of class `j`.
Returns:
KL divergence between two distributions. Output dimension is 1D, one entry
per distribution in `p`.
Raises:
ValueError: If any of the inputs aren't floating-point.
ValueError: If p or p_logits aren't 2D.
ValueError: If q isn't 1D.
"""
for tensor in [p, p_logits, q]:
if not tensor.dtype.is_floating:
raise ValueError('Input %s must be floating type.', tensor.name)
p.shape.assert_has_rank(2)
p_logits.shape.assert_has_rank(2)
q.shape.assert_has_rank(1)
return math_ops.reduce_sum(
p * (nn_ops.log_softmax(p_logits) - math_ops.log(q)), axis=1)
def get_graph_def_from_disk(filename):
"""Get a GraphDef proto from a disk location."""
with gfile.FastGFile(filename, 'rb') as f:
return graph_pb2.GraphDef.FromString(f.read())
def get_graph_def_from_resource(filename):
"""Get a GraphDef proto from within a .par file."""
return graph_pb2.GraphDef.FromString(resource_loader.load_resource(filename))
def get_graph_def_from_url_tarball(url, filename):
"""Get a GraphDef proto from a tarball on the web."""
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
url, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
tar_filename, _ = urllib.request.urlretrieve(url, reporthook=_progress)
with tarfile.open(tar_filename, 'r:gz') as tar:
proto_str = tar.extractfile(filename).read()
return graph_pb2.GraphDef.FromString(proto_str)
def _default_graph_def_fn():
return get_graph_def_from_url_tarball(INCEPTION_URL, INCEPTION_FROZEN_GRAPH)
def run_inception(images,
graph_def=None,
default_graph_def_fn=_default_graph_def_fn,
image_size=INCEPTION_DEFAULT_IMAGE_SIZE,
input_tensor=INCEPTION_INPUT,
output_tensor=INCEPTION_OUTPUT):
"""Run images through a pretrained Inception classifier.
Args:
images: Input tensors. Must be [batch, height, width, channels]. Input shape
and values must be in [-1, 1], which can be achieved using
`preprocess_image`.
graph_def: A GraphDef proto of a pretrained Inception graph. If `None`,
call `default_graph_def_fn` to get GraphDef.
default_graph_def_fn: A function that returns a GraphDef. Used if
`graph_def` is `None. By default, returns a pretrained InceptionV3 graph.
image_size: Required image width and height. See unit tests for the default
values.
input_tensor: Name of input Tensor.
output_tensor: Name or list of output Tensors. This function will compute
activations at the specified layer. Examples include INCEPTION_V3_OUTPUT
and INCEPTION_V3_FINAL_POOL which would result in this function computing
the final logits or the penultimate pooling layer.
Returns:
Tensor or Tensors corresponding to computed `output_tensor`.
Raises:
ValueError: If images are not the correct size.
ValueError: If neither `graph_def` nor `default_graph_def_fn` are provided.
"""
images = _validate_images(images, image_size)
if graph_def is None:
if default_graph_def_fn is None:
raise ValueError('If `graph_def` is `None`, must provide '
'`default_graph_def_fn`.')
graph_def = default_graph_def_fn()
activations = run_image_classifier(images, graph_def, input_tensor,
output_tensor)
if isinstance(activations, list):
for i, activation in enumerate(activations):
if array_ops.rank(activation) != 2:
activations[i] = layers.flatten(activation)
else:
if array_ops.rank(activations) != 2:
activations = layers.flatten(activations)
return activations
def run_image_classifier(tensor, graph_def, input_tensor,
output_tensor, scope='RunClassifier'):
"""Runs a network from a frozen graph.
Args:
tensor: An Input tensor.
graph_def: A GraphDef proto.
input_tensor: Name of input tensor in graph def.
output_tensor: A tensor name or list of tensor names in graph def.
scope: Name scope for classifier.
Returns:
Classifier output if `output_tensor` is a string, or a list of outputs if
`output_tensor` is a list.
Raises:
ValueError: If `input_tensor` or `output_tensor` aren't in the graph_def.
"""
input_map = {input_tensor: tensor}
is_singleton = isinstance(output_tensor, str)
if is_singleton:
output_tensor = [output_tensor]
classifier_outputs = importer.import_graph_def(
graph_def, input_map, output_tensor, name=scope)
if is_singleton:
classifier_outputs = classifier_outputs[0]
return classifier_outputs
def classifier_score(images, classifier_fn, num_batches=1):
"""Classifier score for evaluating a conditional generative model.
This is based on the Inception Score, but for an arbitrary classifier.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
NOTE: This function consumes images, computes their logits, and then
computes the classifier score. If you would like to precompute many logits for
large batches, use clasifier_score_from_logits(), which this method also
uses.
Args:
images: Images to calculate the classifier score for.
classifier_fn: A function that takes images and produces logits based on a
classifier.
num_batches: Number of batches to split `generated_images` in to in order to
efficiently run them through the classifier network.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `classifier_fn`.
"""
generated_images_list = array_ops.split(
images, num_or_size_splits=num_batches)
# Compute the classifier splits using the memory-efficient `map_fn`.
logits = functional_ops.map_fn(
fn=classifier_fn,
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return classifier_score_from_logits(logits)
def classifier_score_from_logits(logits):
"""Classifier score for evaluating a generative model from logits.
This method computes the classifier score for a set of logits. This can be
used independently of the classifier_score() method, especially in the case
of using large batches during evaluation where we would like precompute all
of the logits before computing the classifier score.
This technique is described in detail in https://arxiv.org/abs/1606.03498. In
summary, this function calculates:
exp( E[ KL(p(y|x) || p(y)) ] )
which captures how different the network's classification prediction is from
the prior distribution over classes.
Args:
logits: Precomputed 2D tensor of logits that will be used to
compute the classifier score.
Returns:
The classifier score. A floating-point scalar of the same type as the output
of `logits`.
"""
logits.shape.assert_has_rank(2)
# Use maximum precision for best results.
logits_dtype = logits.dtype
if logits_dtype != dtypes.float64:
logits = math_ops.to_double(logits)
p = nn_ops.softmax(logits)
q = math_ops.reduce_mean(p, axis=0)
kl = _kl_divergence(p, logits, q)
kl.shape.assert_has_rank(1)
log_score = math_ops.reduce_mean(kl)
final_score = math_ops.exp(log_score)
if logits_dtype != dtypes.float64:
final_score = math_ops.cast(final_score, logits_dtype)
return final_score
inception_score = functools.partial(
classifier_score,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_OUTPUT))
def trace_sqrt_product(sigma, sigma_v):
"""Find the trace of the positive sqrt of product of covariance matrices.
'_symmetric_matrix_square_root' only works for symmetric matrices, so we
cannot just take _symmetric_matrix_square_root(sigma * sigma_v).
('sigma' and 'sigma_v' are symmetric, but their product is not necessarily).
Let sigma = A A so A = sqrt(sigma), and sigma_v = B B.
We want to find trace(sqrt(sigma sigma_v)) = trace(sqrt(A A B B))
Note the following properties:
(i) forall M1, M2: eigenvalues(M1 M2) = eigenvalues(M2 M1)
=> eigenvalues(A A B B) = eigenvalues (A B B A)
(ii) if M1 = sqrt(M2), then eigenvalues(M1) = sqrt(eigenvalues(M2))
=> eigenvalues(sqrt(sigma sigma_v)) = sqrt(eigenvalues(A B B A))
(iii) forall M: trace(M) = sum(eigenvalues(M))
=> trace(sqrt(sigma sigma_v)) = sum(eigenvalues(sqrt(sigma sigma_v)))
= sum(sqrt(eigenvalues(A B B A)))
= sum(eigenvalues(sqrt(A B B A)))
= trace(sqrt(A B B A))
= trace(sqrt(A sigma_v A))
A = sqrt(sigma). Both sigma and A sigma_v A are symmetric, so we **can**
use the _symmetric_matrix_square_root function to find the roots of these
matrices.
Args:
sigma: a square, symmetric, real, positive semi-definite covariance matrix
sigma_v: same as sigma
Returns:
The trace of the positive square root of sigma*sigma_v
"""
# Note sqrt_sigma is called "A" in the proof above
sqrt_sigma = _symmetric_matrix_square_root(sigma)
# This is sqrt(A sigma_v A) above
sqrt_a_sigmav_a = math_ops.matmul(
sqrt_sigma, math_ops.matmul(sigma_v, sqrt_sigma))
return math_ops.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
def frechet_classifier_distance(real_images,
generated_images,
classifier_fn,
num_batches=1):
"""Classifier distance for evaluating a generative model.
This is based on the Frechet Inception distance, but for an arbitrary
classifier.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Note that when computed using sample means and sample covariance matrices,
Frechet distance is biased. It is more biased for small sample sizes. (e.g.
even if the two distributions are the same, for a small sample size, the
expected Frechet distance is large). It is important to use the same
sample size to compute frechet classifier distance when comparing two
generative models.
NOTE: This function consumes images, computes their activations, and then
computes the classifier score. If you would like to precompute many
activations for real and generated images for large batches, please use
frechet_clasifier_distance_from_activations(), which this method also uses.
Args:
real_images: Real images to use to compute Frechet Inception distance.
generated_images: Generated images to use to compute Frechet Inception
distance.
classifier_fn: A function that takes images and produces activations
based on a classifier.
num_batches: Number of batches to split images in to in order to
efficiently run them through the classifier network.
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of `classifier_fn`.
"""
real_images_list = array_ops.split(
real_images, num_or_size_splits=num_batches)
generated_images_list = array_ops.split(
generated_images, num_or_size_splits=num_batches)
imgs = array_ops.stack(real_images_list + generated_images_list)
# Compute the activations using the memory-efficient `map_fn`.
activations = functional_ops.map_fn(
fn=classifier_fn,
elems=imgs,
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
# Split the activations by the real and generated images.
real_a, gen_a = array_ops.split(activations, [num_batches, num_batches], 0)
# Ensure the activations have the right shapes.
real_a = array_ops.concat(array_ops.unstack(real_a), 0)
gen_a = array_ops.concat(array_ops.unstack(gen_a), 0)
return frechet_classifier_distance_from_activations(real_a, gen_a)
def frechet_classifier_distance_from_activations(
real_activations, generated_activations):
"""Classifier distance for evaluating a generative model from activations.
This methods computes the Frechet classifier distance from activations of
real images and generated images. This can be used independently of the
frechet_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like precompute all of the
activations before computing the classifier distance.
This technique is described in detail in https://arxiv.org/abs/1706.08500.
Given two Gaussian distribution with means m and m_w and covariance matrices
C and C_w, this function calcuates
|m - m_w|^2 + Tr(C + C_w - 2(C * C_w)^(1/2))
which captures how different the distributions of real images and generated
images (or more accurately, their visual features) are. Note that unlike the
Inception score, this is a true distance and utilizes information about real
world images.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
Returns:
The Frechet Inception distance. A floating-point scalar of the same type
as the output of the activations.
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
activations_dtype = real_activations.dtype
if activations_dtype != dtypes.float64:
real_activations = math_ops.to_double(real_activations)
generated_activations = math_ops.to_double(generated_activations)
# Compute mean and covariance matrices of activations.
m = math_ops.reduce_mean(real_activations, 0)
m_v = math_ops.reduce_mean(generated_activations, 0)
num_examples = math_ops.to_double(array_ops.shape(real_activations)[0])
# sigma = (1 / (n - 1)) * (X - mu) (X - mu)^T
real_centered = real_activations - m
sigma = math_ops.matmul(
real_centered, real_centered, transpose_a=True) / (num_examples - 1)
gen_centered = generated_activations - m_v
sigma_v = math_ops.matmul(
gen_centered, gen_centered, transpose_a=True) / (num_examples - 1)
# Find the Tr(sqrt(sigma sigma_v)) component of FID
sqrt_trace_component = trace_sqrt_product(sigma, sigma_v)
# Compute the two components of FID.
# First the covariance component.
# Here, note that trace(A + B) = trace(A) + trace(B)
trace = math_ops.trace(sigma + sigma_v) - 2.0 * sqrt_trace_component
# Next the distance between means.
mean = math_ops.square(linalg_ops.norm(m - m_v)) # This uses the L2 norm.
fid = trace + mean
if activations_dtype != dtypes.float64:
fid = math_ops.cast(fid, activations_dtype)
return fid
frechet_inception_distance = functools.partial(
frechet_classifier_distance,
classifier_fn=functools.partial(
run_inception, output_tensor=INCEPTION_FINAL_POOL))
|
the-stack_0_2244 | from Roteiro8.Roteiro8__funcoes import Grafo
grafo = Grafo()
for v in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:
grafo.adicionaVertice(v)
for a, p in {'a-b': 9, 'a-g': 4,
'b-c': 6, 'b-g': 10, 'b-h': 7,
'c-d': 8, 'c-e': 12, 'c-f': 8,
'd-e': 14,
'e-f': 2,
'f-h': 2, 'f-g': 1}.items():
grafo.adicionaArestaComPeso(a, p)
print('Grafo:')
print(grafo)
print('Pesos:', grafo.pesos())
print()
print('Minimum Spanning Tree com Prim Modificado:')
print(grafo.prim_mod())
|
the-stack_0_2245 | from random import randint
from kivy.animation import Animation
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import Clock, NumericProperty, ListProperty
from kivy.uix.floatlayout import FloatLayout
Builder.load_file("uix/components/kv/confetti_rain.kv")
class ConfettiItem(FloatLayout):
color = ListProperty()
def __init__(self, color, **kwargs):
super().__init__(**kwargs)
self.color = color
Clock.schedule_once(self._update)
def _update(self, *args):
(Animation(opacity=1, d=randint(*self.parent.time_before_fade))).start(
self)
class ConfettiRain(FloatLayout):
size_range = ListProperty([dp(2), dp(7)])
time_range = ListProperty([3, 5])
speed = ListProperty([3, 6])
time_before_fade = ListProperty([1, 3])
number = NumericProperty(150)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.running = False
def start(self):
if not self.running:
self.running = True
Clock.schedule_once(self._update)
def stop(self):
self.running = False
def add_item(self, count):
for x in range(count):
color = [randint(0, 255)/255, randint(0, 255)/255, randint(0, 255)/255, 1]
item = ConfettiItem(
size=[
randint(int(self.size_range[0]), int(self.size_range[1])),
randint(int(self.size_range[0]), int(self.size_range[1]))
],
pos=[
randint(0, int(self.width)),
randint(int(self.height * 0.9), int(self.height))
],
color=color
)
self.add_widget(item)
self.start_anim(item)
return None
def _update(self, *args):
self.add_item(self.number)
def start_anim(self, item):
target_pos = [randint(0, self.width), 0]
final_time = randint(*self.time_range)
speed = randint(*self.time_range)
anim = Animation(pos=target_pos, d=speed)
anim.start(item)
# remove
Clock.schedule_once(lambda x: self.remove_widget(item), final_time)
# add new
if self.running:
Clock.schedule_once(lambda x: self.add_item(1), final_time)
fade_time = final_time - randint(*self.time_before_fade)
Clock.schedule_once(lambda x: self._fade_out(item, fade_time),
final_time - fade_time)
def _fade_out(self, item, time):
anim = Animation(opacity=0, d=time)
anim.start(item) |
the-stack_0_2248 | from __future__ import absolute_import
import argparse
import docker
import os
import random
import sys
import shutil
import traceback
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.constants import INDEX_DIR
from ann_benchmarks.algorithms.definitions import get_definitions, list_algorithms, algorithm_status, InstantiationStatus
from ann_benchmarks.results import get_result_filename
from ann_benchmarks.runner import run, run_docker
def positive_int(s):
i = None
try:
i = int(s)
except ValueError:
pass
if not i or i < 1:
raise argparse.ArgumentTypeError("%r is not a positive integer" % s)
return i
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
metavar='NAME',
help='the dataset to load training points from',
default='glove-100-angular',
choices=DATASETS.keys())
parser.add_argument(
"-k", "--count",
default=10,
type=positive_int,
help="the number of near neighbours to search for")
parser.add_argument(
'--definitions',
metavar='FILE',
help='load algorithm definitions from FILE',
default='algos.yaml')
parser.add_argument(
'--algorithm',
metavar='NAME',
help='run only the named algorithm',
default=None)
parser.add_argument(
'--docker-tag',
metavar='NAME',
help='run only algorithms in a particular docker image',
default=None)
parser.add_argument(
'--list-algorithms',
help='print the names of all known algorithms and exit',
action='store_true')
parser.add_argument(
'--force',
help='''re-run algorithms even if their results already exist''',
action='store_true')
parser.add_argument(
'--runs',
metavar='COUNT',
type=positive_int,
help='run each algorithm instance %(metavar)s times and use only the best result',
default=2)
parser.add_argument(
'--timeout',
type=int,
help='Timeout (in seconds) for each individual algorithm run, or -1 if no timeout should be set',
default=5*3600)
parser.add_argument(
'--local',
action='store_true',
help='If set, then will run everything locally (inside the same process) rather than using Docker')
parser.add_argument(
'--batch',
action='store_true',
help='If set, algorithms get all queries at once')
parser.add_argument(
'--max-n-algorithms',
type=int,
help='Max number of algorithms to run (just used for testing)',
default=-1)
parser.add_argument(
'--run-disabled',
help='run algorithms that are disabled in algos.yml',
action='store_true')
args = parser.parse_args()
if args.timeout == -1:
args.timeout = None
if args.list_algorithms:
list_algorithms(args.definitions)
sys.exit(0)
# Nmslib specific code
# Remove old indices stored on disk
if os.path.exists(INDEX_DIR):
shutil.rmtree(INDEX_DIR)
dataset = get_dataset(args.dataset)
dimension = len(dataset['train'][0]) # TODO(erikbern): ugly
point_type = dataset.attrs.get('point_type', 'float')
distance = dataset.attrs['distance']
definitions = get_definitions(args.definitions, dimension, point_type, distance, args.count)
# Filter out, from the loaded definitions, all those query argument groups
# that correspond to experiments that have already been run. (This might
# mean removing a definition altogether, so we can't just use a list
# comprehension.)
filtered_definitions = []
for definition in definitions:
query_argument_groups = definition.query_argument_groups
if not query_argument_groups:
query_argument_groups = [[]]
not_yet_run = []
for query_arguments in query_argument_groups:
fn = get_result_filename(args.dataset,
args.count, definition, query_arguments, args.batch)
if args.force or not os.path.exists(fn):
not_yet_run.append(query_arguments)
if not_yet_run:
if definition.query_argument_groups:
definition = definition._replace(
query_argument_groups = not_yet_run)
filtered_definitions.append(definition)
definitions = filtered_definitions
random.shuffle(definitions)
if args.algorithm:
print('running only', args.algorithm)
definitions = [d for d in definitions if d.algorithm == args.algorithm]
if not args.local:
# See which Docker images we have available
docker_client = docker.from_env()
docker_tags = set()
for image in docker_client.images.list():
for tag in image.tags:
tag = tag.split(':')[0]
docker_tags.add(tag)
if args.docker_tag:
print('running only', args.docker_tag)
definitions = [d for d in definitions if d.docker_tag == args.docker_tag]
if set(d.docker_tag for d in definitions).difference(docker_tags):
print('not all docker images available, only:', set(docker_tags))
print('missing docker images:', set(d.docker_tag for d in definitions).difference(docker_tags))
definitions = [d for d in definitions if d.docker_tag in docker_tags]
else:
def _test(df):
status = algorithm_status(df)
# If the module was loaded but doesn't actually have a constructor of
# the right name, then the definition is broken
assert status != InstantiationStatus.NO_CONSTRUCTOR, """\
%s.%s(%s): error: the module '%s' does not expose the named constructor""" % (df.module, df.constructor, df.arguments, df.module)
if status == InstantiationStatus.NO_MODULE:
# If the module couldn't be loaded (presumably because of a missing
# dependency), print a warning and remove this definition from the
# list of things to be run
print("""\
%s.%s(%s): warning: the module '%s' could not be loaded; skipping""" % (df.module, df.constructor, df.arguments, df.module))
return False
else:
return True
definitions = [d for d in definitions if _test(d)]
if not args.run_disabled:
if len([d for d in definitions if d.disabled]):
print('Not running disabled algorithms:', [d for d in definitions if d.disabled])
definitions = [d for d in definitions if not d.disabled]
if args.max_n_algorithms >= 0:
definitions = definitions[:args.max_n_algorithms]
if len(definitions) == 0:
raise Exception('Nothing to run')
else:
print('Order:', definitions)
for definition in definitions:
print(definition, '...')
try:
if args.local:
run(definition, args.dataset, args.count, args.runs, args.batch)
else:
run_docker(definition, args.dataset, args.count, args.runs, args.timeout, args.batch)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
|
the-stack_0_2249 | import unittest
from tests._compat import patch, call
import requests_mock
from proxy_db.utils import download_file, get_domain
class TestDownloadFile(unittest.TestCase):
url = 'https://domain.com/'
def setUp(self):
super(TestDownloadFile, self).setUp()
self.session_mock = requests_mock.Mocker()
self.session_mock.start()
def tearDown(self):
super(TestDownloadFile, self).tearDown()
self.session_mock.stop()
@patch('proxy_db.utils.open')
def test_request(self, m):
text = 'foo' * 1000
self.session_mock.get(self.url, text=text)
download_file(self.url)
self.assertEqual(self.session_mock.call_count, 1)
calls = [call.write(text[i:i+1024].encode('utf-8')) for i in range(0, len(text), 1024)]
self.assertEqual(m.return_value.__enter__.return_value.mock_calls, calls)
class TestGetDomain(unittest.TestCase):
def test_get_domain(self):
self.assertEqual(get_domain('https://user:[email protected]:8888/'), 'domain.com')
|
the-stack_0_2250 | import asyncio
import aiopg
import aiosqlite
from motor import motor_asyncio
import discordSuperUtils
async def database_test():
mongo_database = discordSuperUtils.DatabaseManager.connect(
motor_asyncio.AsyncIOMotorClient("con-string")["name"]
)
# Replace 'con-string' with the MongoDB connection string and 'name' by the database name you want to use.
postgre_database = discordSuperUtils.DatabaseManager.connect(
await aiopg.create_pool("con-string")
)
# Replace 'con-string' with the PostrgeSQL connection string.
# PostgreSQL connection string example:
# "dbname=name user=postgres password=xxx host=host" host is not required.
mysql_database = await discordSuperUtils.create_mysql(
host=..., port=..., user=..., password=..., dbname=...
)
# Replace '...' with the arguments.
# create_mysql supports mysql AND mariaDB
sqlite_database = discordSuperUtils.DatabaseManager.connect(
await aiosqlite.connect("path")
)
# Replace 'path' with the SQLite database path. (must be on your computer)
await sqlite_database.insert(
"economy", {"guild": ..., "member": ..., "currency": ..., "bank": ...}
)
await sqlite_database.close() # not required.
loop = asyncio.get_event_loop()
loop.run_until_complete(database_test())
|
the-stack_0_2251 | from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
from KratosMultiphysics import kratos_utilities
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.CoSimulationApplication.solver_wrappers.sdof.sdof_static_solver import SDoFStaticSolver
import os
import numpy as np
class TestSdofStaticSolver(KratosUnittest.TestCase):
def setUp(self):
self.system_settings = {
"system_parameters":{
"stiffness" : 50000.0,
},
"output_parameters":{
"write_output_file": True,
"file_name" : "result.dat"
}
}
#result.dat
self.end_time = 1.0
self.time = 0.0
@classmethod
def tearDownClass(self):
kratos_utilities.DeleteFileIfExisting("result.dat")
kratos_utilities.DeleteFileIfExisting('fsi_sdof_static/results_final_sdof.dat')
def __CompareResults(self, reference, result):
ref = np.loadtxt(reference, skiprows=1)
res = np.loadtxt(result, skiprows=1)
self.assertEqual(ref.all(), res.all())
def __ExecuteTest(self, settings, ref_file_name):
settings.update(self.system_settings)
system = SDoFStaticSolver(settings)
system.Initialize()
system.SolveSolutionStep()
system.OutputSolutionStep()
self.__CompareResults(os.path.join("reference_files", ref_file_name), "result.dat")
def test_initial_displacement(self):
settings = {
"initial_values":{
"displacement" : 1.0,
}
}
self.__ExecuteTest(settings, "ref_sdof_static_initial_displacement.dat")
def test_final_displacement(self):
import json
parameter_file_name = "fsi_sdof_static/ProjectParametersSDoF.json"
with open(parameter_file_name, 'r') as parameter_file:
settings = json.load(parameter_file)
settings["output_parameters"]["write_output_file"] = True
system = SDoFStaticSolver(settings)
system.Initialize()
system.SolveSolutionStep()
system.OutputSolutionStep()
results_obtained = np.loadtxt('fsi_sdof_static/results_final_sdof.dat', skiprows=1)
results_reference = np.loadtxt('reference_files/ref_sdof_static_final_displacement.dat', skiprows=1)
self.assertEqual(results_reference.all(), results_obtained.all())
if __name__ == '__main__':
KratosUnittest.main()
|
the-stack_0_2252 | from nltk import word_tokenize
from nltk.stem import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.corpus import stopwords
import re
import nltk
import collections
from sklearn.metrics import silhouette_samples, silhouette_score
import pandas as pd
import json
FILE = open("/tmp/resultat_clustering.txt", "w")
# Read Token
with open('token.json') as json_file:
TOKENIZED_WORDS = json.load(json_file)
def body_to_words(raw_body):
"""
Args:
raw_body:
Returns:
"""
letters_only = re.sub("[^a-zA-Z]", " ", raw_body)
text = re.sub("<[^<]+?>", "", letters_only)
text_clean = " ".join([w for w in text.split() if ((len(w) > 3) and (len(w) < 23))])
words = text_clean.lower().split()
stop_words = set(
stopwords.words("french") + stopwords.words("english") + TOKENIZED_WORDS
)
meaningful_words = [w for w in words if w not in stop_words]
# clean_words = [w for w in meaningful_words if w not in TOKENIZED_WORDS]
return " ".join(meaningful_words)
def word_tokenizer(text):
"""
Args:
text:
Returns:
"""
tokens = word_tokenize(text, language="french")
stemmer = PorterStemmer()
tokens = [
stemmer.stem(t)
for t in tokens
if t not in (stopwords.words("french") + stopwords.words("english"))
]
return tokens
def tokenize_and_stem(text):
"""
Args:
text:
Returns:
"""
tokens = [
word
for sent in nltk.sent_tokenize(text, language="french")
for word in nltk.word_tokenize(sent, language="french")
]
filtered_tokens = []
stemmer = SnowballStemmer(language="french")
for token in tokens:
if re.search("[a-zA-Z]", token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
def tokenize_only(text):
"""
first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
Args:
text:
Returns:
"""
tokens = [
word.lower()
for sent in nltk.sent_tokenize(text, language="french")
for word in nltk.word_tokenize(sent, language="french")
]
filtered_tokens = []
for token in tokens:
if re.search("[a-zA-Z]", token):
filtered_tokens.append(token)
return filtered_tokens
def pre_processing_dataset(dataset_train):
"""Prepare dataset to list[dict] to do clustering in body
Args:
dataset_train:
Returns:
"""
num_reviews = dataset_train["body"].size
clean_train_reviews = []
for i in range(0, num_reviews):
if (i + 1) % 1000 == 0:
print("body %d of %d\n" % (i + 1, num_reviews))
clean_train_reviews.append(
{
"body": body_to_words(str(dataset_train["body"][i])),
"idMail": dataset_train["idMail"][i],
}
)
print("Creating the bag of words...\n")
return clean_train_reviews
def predict_clustering_group(k_means_model, tfidf_matrix):
"""
Args:
k_means_model:
tfidf_matrix:
Returns:
"""
cluster_labels = k_means_model.fit_predict(tfidf_matrix)
print("cluster_labels", cluster_labels)
silhouette_avg = silhouette_score(tfidf_matrix, cluster_labels)
print("silhouette_avg", silhouette_avg)
sample_silhouette_values = silhouette_samples(tfidf_matrix, cluster_labels)
print("sample_silhouette_values", sample_silhouette_values)
centers = k_means_model.cluster_centers_
print("centers", centers)
n_clusters = centers.shape[0]
print("n_clusters", n_clusters)
return cluster_labels, silhouette_avg, sample_silhouette_values
def build_label_mails(
vocab_frame,
k_means_model,
tfidf_vectorizer,
clusters,
clean_train_reviews,
n_clusters,
):
"""
Args:
vocab_frame:
k_means_model:
tfidf_vectorizer:
clusters:
clean_train_reviews:
n_clusters:
Returns:
"""
order_centroids = k_means_model.cluster_centers_.argsort()[:, ::-1]
terms = tfidf_vectorizer.get_feature_names()
label = []
for cluster in range(n_clusters):
cluster_label = []
for ind in order_centroids[cluster, :n_clusters]:
label_name = (
vocab_frame.loc[terms[ind].split(" ")]
.values.tolist()[0][0]
.encode("utf-8", "ignore")
)
cluster_label.insert(cluster, label_name.decode("utf-8"))
label.append(cluster_label)
for cluster in range(n_clusters):
FILE.write("cluster " + str(cluster) + ":" + "\n")
FILE.write("centroid" + str(cluster) + "\n")
for i, sentence in enumerate(clusters[cluster]):
clean_train_reviews[sentence]["cluster_group"] = str(cluster)
clean_train_reviews[sentence]["label"] = label[cluster]
FILE.write(
"mail :" + str(i) + ": " + str(clean_train_reviews[sentence]) + "\n"
)
centers = k_means_model.cluster_centers_
return label
def build_cluster_from_model(n_clusters, tfidf_matrix):
"""
Args:
n_clusters:
tfidf_matrix:
Returns:
dict(clusters):
k_means_model:
"""
k_means_model = KMeans(
n_clusters=n_clusters, init="k-means++", max_iter=300, n_init=1
)
k_means_model.fit(tfidf_matrix)
clusters = collections.defaultdict(list)
for i, label in enumerate(k_means_model.labels_):
clusters[label].append(i)
return dict(clusters), k_means_model
def build_tfidf_matrix_vector(dataset):
"""
Args:
dataset:
Returns:
tfidf_matrix:
tfidf_vectorizer:
"""
train_body = []
for i in range(0, len(dataset)):
train_body.append(dataset[i]["body"])
tfidf_vectorizer = TfidfVectorizer(
tokenizer=tokenize_and_stem,
analyzer="word",
stop_words=stopwords.words("french")
+ TOKENIZED_WORDS
+ stopwords.words("english"),
max_df=0.8,
min_df=0.1,
lowercase=False,
use_idf=True,
max_features=200000,
ngram_range=(1, 3),
)
tfidf_matrix = tfidf_vectorizer.fit_transform(train_body)
print(tfidf_matrix.shape)
return tfidf_matrix, tfidf_vectorizer
def build_vocab_frame(clean_train_reviews):
""" Build frame of vocabulary
Args:
clean_train_reviews(list): list of mails
Returns:
vocab_frame:
"""
body = [mail["body"] for mail in clean_train_reviews]
total_vocab_stemmed = []
total_vocab_tokenized = []
for i in body:
allwords_stemmed = tokenize_and_stem(
i
) # for each item in 'synopses', tokenize/stem
total_vocab_stemmed.extend(
allwords_stemmed
) # extend the 'totalvocab_stemmed' list
allwords_tokenized = tokenize_only(i)
total_vocab_tokenized.extend(allwords_tokenized)
vocab_frame = pd.DataFrame(
{"words": total_vocab_tokenized}, index=total_vocab_stemmed
)
return vocab_frame
|
the-stack_0_2253 | from flask import request, jsonify, Blueprint
from app.models import Nonprofit, NonprofitSchema
api_blueprint = Blueprint('api', __name__,)
npschema = NonprofitSchema()
npschemas = NonprofitSchema(many=True)
def jsonsift(obj, attrlist):
''' Use a custom attribute list to filter attributes from the model object to send a specific built JSON response back '''
resp = {}
for attr in attrlist:
resp[attr] = getattr(obj,attr)
return resp
@api_blueprint.route("/api/orgs/all", methods=["GET"])
def get_orgs():
all_orgs = Nonprofit.query.all()
# paginate logic
records = []
for x, org in enumerate(all_orgs):
records.append(org)
if x == 10:
break
return npschemas.jsonify(records)
@api_blueprint.route("/api/orgs/id/<int:id>", methods=["GET"])
def get_org_by_id(id):
org = Nonprofit.query.get(id)
return npschema.jsonify(org)
@api_blueprint.route("/api/orgs/id/<int:id>/address", methods=["GET"])
def get_org_address_by_id(id):
org = Nonprofit.query.get(id)
only_these_fields = ["id", "ein", "name", "street", "city", "state", "zipcode"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/id/<int:id>/geocode", methods=["GET"])
def get_org_geocode_by_id(id):
org = Nonprofit.query.get(id)
only_these_fields = ["id", "ein", "name", "longitude", "latitude"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/ein/<int:ein>", methods=["GET"])
def get_org_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
return npschema.jsonify(org)
@api_blueprint.route("/api/orgs/ein/<int:ein>/address", methods=["GET"])
def get_org_address_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
only_these_fields = ["id", "ein", "name", "street", "city", "state", "zipcode"]
return jsonify(jsonsift(org, only_these_fields))
@api_blueprint.route("/api/orgs/ein/<int:ein>/geocode", methods=["GET"])
def get_org_geocode_by_ein(ein):
org = Nonprofit.query.filter(Nonprofit.ein == ein).first()
only_these_fields = ["id", "ein", "name", "longitude", "latitude"]
return jsonify(jsonsift(org, only_these_fields))
|
the-stack_0_2254 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import math
import mxnet as mx
from mxnet import np, npx
from mxnet.gluon.nn.activations import Activation
from . import constants as C
logger = logging.getLogger(__name__)
# Modified from the source to mxnet.gluon.nn.basic_layers.Dense which is:
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class QuantizableDense(mx.gluon.HybridBlock):
r"""Optionally Quantized fully-connected NN layer.
`QuantDense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool, default True
Whether the layer uses a bias vector.
flatten: bool, default True
Whether the input tensor should be flattened.
If true, all but the first axis of input data are collapsed together.
If false, all but the last axis of input data are kept the same, and the transformation
applies on the last axis.
dtype : str or np.dtype, default C.DTYPE_FP32
Data type of output embeddings.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Inputs:
- **data**: if `flatten` is True, `data` should be a tensor with shape
`(batch_size, x1, x2, ..., xn)`, where x1 * x2 * ... * xn is equal to
`in_units`. If `flatten` is False, `data` should have shape
`(x1, x2, ..., xn, in_units)`.
Outputs:
- **out**: if `flatten` is True, `out` will be a tensor with shape
`(batch_size, units)`. If `flatten` is False, `out` will have shape
`(x1, x2, ..., xn, units)`.
"""
def __init__(self, units, dtype: str, activation=None, use_bias=True, flatten=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0):
super(QuantizableDense, self).__init__()
self._flatten = flatten
self._dtype = dtype
self._units = units
self._in_units = in_units
self.scaling = None
if dtype == C.DTYPE_INT8:
self.scaling = mx.gluon.Parameter('scaling', shape=(1,),
# Initialize to an obviously wrong value so we can detect later
init=mx.initializer.Constant(-1.0), dtype=C.DTYPE_FP32,
allow_deferred_init=True)
weight_initializer = 'zeros' # Most initializers don't work for int8, but this is for inference anyway.
self.weight = mx.gluon.Parameter('weight', shape=(units, in_units),
init=weight_initializer, dtype=dtype,
allow_deferred_init=True)
self.bias = mx.gluon.Parameter('bias', shape=(units,),
init=bias_initializer, dtype=C.DTYPE_FP32,
allow_deferred_init=True) if use_bias else None
self.act = Activation(activation) if activation is not None else None
if activation is not None:
self.act = Activation(activation)
def cast(self, dtype):
if self._dtype != C.DTYPE_INT8:
self._dtype = dtype
super(QuantizableDense, self).cast(dtype)
else:
#No casting an already quantized matrix.
logger.warning("Ignoring casting on int8 matrix")
def infer_shape(self, x, *args):
if self._flatten:
num_input = 1
for i in range(1, x.ndim):
num_input *= x.shape[i]
self.weight.shape = (self.weight.shape[0], num_input)
else:
self.weight.shape = (self.weight.shape[0], x.shape[x.ndim - 1])
def forward(self, x):
if self._dtype == C.DTYPE_INT8:
if self.bias is not None:
act = npx.intgemm_fully_connected(x,
weight=self.weight.data(),
scaling=self.scaling.data(),
bias=self.bias.data(), no_bias=False,
num_hidden=self._units,
flatten=self._flatten)
else:
act = npx.intgemm_fully_connected(x,
weight=self.weight.data(),
scaling=self.scaling.data(),
no_bias=True,
num_hidden=self._units,
flatten=self._flatten)
else:
act = npx.fully_connected(x,
weight=self.weight.data(),
bias=self.bias.data() if self.bias else None, no_bias=self.bias is None,
num_hidden=self._units,
flatten=self._flatten)
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]))
def optimize_quantization_mse(tensor, rounds=10):
"""
Minimize mean squared error of quantizing a tensor, returning the top value
(i.e. the one that quantizes to 127). Scaling = 127.0 / return value.
This is a convex optimization problem. EM works but makes slow steps.
Instead of EM, use binary search in the direction minimization suggests.
"""
best_mse = math.inf
best_top = None
maxabs = npx.intgemm_maxabsolute(tensor)
low = 0.0
high = maxabs
for _ in range(rounds):
value = (low + high) / 2.0
quant = npx.intgemm_prepare_data(tensor, value)
quant_float = quant.astype(C.DTYPE_FP32)
mse = (quant_float * (value / 127.0) - tensor).norm().item() / math.sqrt(float(tensor.size))
if mse < best_mse:
best_mse = mse
best_top = value
# This optimizes scaling subject to cluster assignment.
# It can be used for EM but the step is really slow, so use it for direction.
scale = np.sum(quant_float * quant_float) / np.sum(quant_float * tensor)
top = 127.0 / scale.item()
if top < value:
high = value
else:
low = value
return best_top
def extract_quant_max(tensor_param: mx.gluon.parameter.Parameter, scaling_param: mx.gluon.parameter.Parameter) -> float:
"""
Extract or tune the scaling factor for a parameter.
"""
scaling = scaling_param.data()
if scaling.item() < 0:
# Bogus auto initialized scaling factor.
b_max = optimize_quantization_mse(tensor_param.data())
scaling_param.set_data(b_max / 127.0)
else:
b_max = scaling * 127.0
return b_max
def convert_weights_disk_format(params: C.ParameterDict, dtype_store: str):
"""
Convert weights from float32 MXNet format (B^T in float32) to disk format
(B^T in int8 format).
If dtype_store == 'int8' then compute scaling and quantize the model.
If dtype_store == 'float32' then just annotate with scaling factors.
:param params model parameters from model.collect_params() in a float32
model.
:param dtype_store data type to store on disk.
"""
logger.info("Optimizing quantization scaling factors")
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
b_max = extract_quant_max(param, params[scaling_name])
if dtype_store == C.DTYPE_INT8:
quantized = npx.intgemm_prepare_data(param.data(), b_max)
param.set_data(quantized)
param.dtype = C.DTYPE_INT8
def convert_weights_cpu_dependent(params: C.ParameterDict):
"""
Convert weights from disk format to intgemm's CPU-dependent format for
quantized matrix multiplication.
:param params model parameters from model.collect_params() in a model that
came from convert_weights_disk_format.
"""
logger.info("Converting weights to CPU format.")
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
if param.dtype == C.DTYPE_INT8:
# Already fully quantized, just rearrange.
weight = npx.intgemm_prepare_weight(param.data(), already_quantized=True)
else:
# Use offline scaling factor if available.
b_max = extract_quant_max(param, params[scaling_name])
weight = npx.intgemm_prepare_weight(param.data(), b_max)
param.set_data(weight)
param.dtype = C.DTYPE_INT8
|
the-stack_0_2255 | """
Test label generation for nodes.
"""
from map_machine.map_configuration import LabelMode
from map_machine.text import Label
from tests import SCHEME
__author__ = "Sergey Vartanov"
__email__ = "[email protected]"
def construct_labels(tags: dict[str, str]) -> list[Label]:
"""Construct labels from OSM node tags."""
processed: set[str] = set()
return SCHEME.construct_text(tags, processed, LabelMode.ALL)
def test_1_label() -> None:
"""Test tags that should be converted into single label."""
labels = construct_labels({"name": "Name"})
assert len(labels) == 1
assert labels[0].text == "Name"
def test_1_label_unknown_tags() -> None:
"""
Test tags with some unknown tags that should be converted into single label.
"""
labels = construct_labels({"name": "Name", "aaa": "bbb"})
assert len(labels) == 1
assert labels[0].text == "Name"
def test_2_labels() -> None:
"""Test tags that should be converted into two labels."""
labels = construct_labels({"name": "Name", "ref": "5"})
assert len(labels) == 2
assert labels[0].text == "Name"
assert labels[1].text == "5"
|
the-stack_0_2256 | """Typing middleware."""
from typing import Any, Callable, Dict, Optional
import falcon
from falcon import Request, Response
from falcontyping.base import (PydanticBaseModel, TypedResource,
TypeValidationError)
from falcontyping.typedjson import DecodingError, ExternalSerializerException
from falcontyping.typedjson import decode as decode_using_hints
_VALID_RESPONSE_TYPES = set([PydanticBaseModel, dict, type(None)])
class TypingMiddleware:
@staticmethod
def _decode_or_raise_error(hint: Any, parameter: Any) -> Any:
"""
Decode value using type hint or fail.
:raises: falcon.HTTPError or ExternalSerializerException
"""
result = decode_using_hints(hint, parameter)
if isinstance(result, DecodingError):
if isinstance(result.reason, ExternalSerializerException):
raise result.reason.exception from None
else:
raise falcon.HTTPError(status=falcon.HTTP_UNPROCESSABLE_ENTITY, # pylint: disable=no-member
description=f'\'{parameter}\' must be of type {hint} not {type(parameter)}')
return result
@staticmethod
def _try_decode_query_or_body(request: falcon.Request, hint: Any) -> Any:
"""Decode values by looking for them in both URI and request body."""
# An assumption is being made here, That only POST, PUT and PATCH can have bodies.
if request.method.lower() in ['post', 'put', 'patch']:
key = 'media'
else:
key = 'params'
return TypingMiddleware._decode_or_raise_error(hint, getattr(request, key, None))
def process_request(self, request: Request, response: Response) -> None:
"""
Process the request before routing it.
Because Falcon routes each request based on req.path, a
request can be effectively re-routed by setting that
attribute to a new value from within process_request().
:param request: Request object that will eventually be
routed to an on_* responder method.
:param response: Response object that will be routed to
the on_* responder.
"""
...
def process_resource(self, request: Request, response: Response, resource: Any, parameters: Dict) -> None:
"""
Process the request after routing.
This method is only called when the request matches
a route to a resource.
:param request: Request object that will be passed to the
routed responder.
:param response: Response object that will be passed to the
responder.
:param resource: Resource object to which the request was
routed.
:param parameters: A dict-like object representing any additional
parameters derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
if not isinstance(resource, TypedResource):
return
handler: Optional[Callable] = getattr(resource, 'on_%s' % request.method.lower(), None)
if handler:
# Get hints for only those variables that should be passed to the request handler.
hints = resource.hints[handler.__name__]
# Decode values using type hints, All values in parameters will be based as
# Keyword arguments to the request handler.
for parameter in filter(hints.get, parameters):
parameters[parameter] = self._decode_or_raise_error(hints[parameter], parameters.get(parameter))
# Decode body parameter if there is one.
body_parameter = resource.methods_body_parameter[handler.__name__]
if body_parameter:
parameters[body_parameter] = self._try_decode_query_or_body(request, hints[body_parameter])
def process_response(self, request: Request, response: Response, resource: Any, request_succeeded: bool) -> None:
"""
Post-processing of the response (after routing).
:param request: Request object.
:param response: Response object.
:param resource: Resource object to which the request was routed.
May be None if no route was found for the request.
:param request_succeeded: True if no exceptions were raised while the framework processed and
routed the request; otherwise False.
"""
if not (isinstance(resource, TypedResource) and request_succeeded):
return
handler: Optional[Callable] = getattr(resource, 'on_%s' % request.method.lower(), None)
# Get type hint for the return type of the request handler.
hint: Any = resource.hints[handler.__name__].get('return') if handler else None
if hint:
media = getattr(response, 'media', None)
media = decode_using_hints(hint, media)
if not any(isinstance(media, type_) for type_ in _VALID_RESPONSE_TYPES): # type: ignore
raise TypeValidationError(f'{resource}.{handler} returned a unexpected value. ',
f'Resource methods must return either Nothing, '
f'marshmallow.Schema or pydantic.BaseModel not {type(media)}')
if isinstance(media, PydanticBaseModel):
media = media.dict()
response.media = media
|
the-stack_0_2258 | import networkx as nx
import utils
import sys
import logging
import os
import uuid
def convert(args):
for graph in args.graphs:
if args.nocycles:
g=nx.DiGraph()
else:
g=nx.MultiDiGraph()
g.graph['paths']=[]
g.graph['path2id']=dict()
g.graph['id2path']=dict()
if graph.endswith(".gfa"): #gfa to gml/gfa
utils.read_gfa(graph,None,None,g,minsamples=args.minsamples,
maxsamples=args.maxsamples,
targetsample=args.targetsample,
remap=False)
if args.type=="gfa":
fn=graph.replace(".gfa",".rewrite.gfa")
graph=utils.write_gfa(g,"", outputfile=fn)
logging.info("gfa graph written to: %s"%fn)
elif args.type=="gml":
fn=utils.write_gml(g,"", hwm=args.hwm, outputfile=graph.replace(".gfa",""), partition=args.partition)
logging.info("gml graph written to: %s"%fn)
elif args.type=="maf":
logging.info("Converting graph to maf..")
graph2maf(g,graph.replace(".gfa",".maf"))
elif graph.endswith(".maf"): #multiple alignment format, convert to graph
g=maf2graph(graph)
filename=graph[:graph.rfind(".")]+".gml"
utils.write_gml(g,"", outputfile=filename)
filename=graph[:graph.rfind(".")]+".gfa"
utils.write_gfa(g,"", outputfile=filename)
logging.debug("gfa graph written to: %s"%filename)
elif graph.endswith(".fa") or graph.endswith(".fasta") or graph.endswith(".fna"): #assume fasta to gfa
if args.aligned:
seqs=[]
names=[]
for name,seq in utils.fasta_reader(graph,keepdash=True):
seqs.append(seq)
names.append(name)
g,nid=utils.aln2graph(seqs,names)
else:
i=0
start=uuid.uuid4().hex
end=uuid.uuid4().hex
g.graph['startnodes']=[start]
g.graph['endnodes']=[end]
g.add_node(start,offsets=dict())
g.add_node(end,offsets=dict())
for i,v in enumerate(utils.fasta_reader(graph)):
name,seq=v
g.graph['paths'].append(name)
g.graph['path2id'][name]=i
g.graph['id2path'][i]=name
g.node[start]['offsets'][i]=0
g.node[end]['offsets'][i]=len(seq)
g.add_node(i,offsets={i:0},seq=seq)
g.add_edge(start,i,paths=set([i]))
g.add_edge(i,end,paths=set([i]))
filename=graph[:graph.rfind(".")]+".gfa"
utils.write_gfa(g,"", outputfile=filename)
logging.debug("gfa graph written to: %s"%filename)
else:
logging.fatal("Unknown filetype, need gfa or fasta extension.")
return
#converts a multiple alignment format file to a graph
def maf2graph(maffile):
files=set()
G=nx.MultiDiGraph()
startnode=uuid.uuid4().hex
endnode=uuid.uuid4().hex
G.graph['startnodes']=set([startnode])
G.graph['endnodes']=set([endnode])
G.graph['path2id']=dict()
G.add_node(startnode,offsets=dict())
G.add_node(endnode,offsets=dict())
nid=0
with open(maffile,"r") as maf:
for line in maf:
if line.startswith("#"):
continue
elif line.startswith("a"): #start of an aligned segment
nid+=1
G.add_node(nid,data=dict())
elif line.startswith("s"):
cols=line.rstrip().split()
if '.' in cols[1]: #TODO: use db parameter to specificy a single mfa file with all sequence
file,name=cols[1][:cols[1].find('.')],cols[1][cols[1].find('.')+1:]
files.add(file)
else:
file=None #args.db?
name=cols[1]
if name not in G.graph['path2id']:
G.graph['path2id'][name]=len(G.graph['path2id'])
G.node[startnode]['offsets'][G.graph['path2id'][name]]=0
G.node[nid]['data'][(file,name)]={'start':int(cols[2]),
'end':int(cols[2])+int(cols[3]),
'orientation':cols[4],
'aln':cols[6]
}
nid+=1
remove=[]
for node,d in G.nodes(data=True):
if 'data' in d and len(d['data'])==1: #multiplicity of 1, strictly not an alignment
remove.append(node)
G.remove_nodes_from(remove)
db=dict() #map name to sequence
for file in files:
for name,seq in utils.fasta_reader(file+".fasta"): #guess that the original file has a ".fasta" extension
name=name.split()[0]
key=(file,name)
if key in db:
logging.fatal("Non unique contig-name: %s. quit."%name)
sys.exit(1)
else:
db[key]=seq
remove=[]
#for every sequence, check that none of the alignments overlap, otherwise assignment is not 1-1
for file,name in db:
seq=db[(file,name)]
intvs=[]
for node in G:
if 'data' in G.node[node]: #does the node represent an aligned segment?
if (file,name) in G.node[node]['data']:
intvs.append((G.node[node]['data'][(file,name)]['start'] , G.node[node]['data'][(file,name)]['end'], node))
intvs.sort() #sort by start position
pstart=0
pend=0
pnode=startnode
unaligned=[]
for start,end,node in intvs:
if start>pend:
unaligned.append((pend,start))
G.add_node(nid,intv=(pend,start),seq=seq[pend:start])
G.add_edge(pnode,nid,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.add_edge(nid,node,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
nid+=1
elif start<pend:
logging.fatal("Overlapping alignments for sequence: %s.%s --> (%d,%d) and (%d,%d)."%(file,name,pstart,pend,start,end))
remove.append(node)
# sys.exit(1)
else: #no gap, just connect subsequent intervals
G.add_edge(pnode,node,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
pstart,pend,pnode=start,end,node
if len(seq)!=pend:
unaligned.append((pend,len(seq)))
G.add_node(nid,intv=((pend,len(seq))),seq=seq[pend:len(seq)])
G.add_edge(pnode,nid,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.add_edge(nid,endnode,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
nid+=1
else:
G.add_edge(pnode,endnode,paths=set([G.graph['path2id'][name]]),ofrom="+",oto="+")
G.remove_nodes_from(remove)
# print "Unaligned segments",unaligned
alignments=[node for node in G if 'data' in G.node[node]]
for node in alignments: #expand all alignments in the graph
if 'data' in G.node[node]:
seqs=[]
names=[]
offsets={}
for file,name in G.node[node]['data']:
seqs.append(G.node[node]['data'][(file,name)]['aln'])
offsets[G.graph['path2id'][name]]=G.node[node]['data'][(file,name)]['start']
names.append(name)
sg,nid=utils.aln2graph(seqs,names,idoffset=nid,path2id=G.graph['path2id'],offsets=offsets)
nid+=1
G.add_nodes_from(sg.nodes(data=True))
G.add_edges_from(sg.edges(data=True))
assert(len(sg.graph['startnodes'])==1)
assert(len(sg.graph['endnodes'])==1)
sgstart=sg.graph['startnodes'][0]
sgend=sg.graph['endnodes'][0]
for v,t,d in G.in_edges(node,data=True):
G.add_edge(v,sgstart,paths=d['paths'],ofrom="+",oto="+")
for v,t,d in G.out_edges(node,data=True):
G.add_edge(sgend,t,paths=d['paths'],ofrom="+",oto="+")
#hack this in here so we can continue
G.node[sgstart]['seq']=""
G.node[sgend]['seq']=""
nx.relabel_nodes(G,{sgstart: nid, sgend: nid+1},copy=False)
nid+=2
G.remove_node(node)
return G
def graph2maf(G,filename):
if isinstance(G,nx.MultiDiGraph):
#TODO: decompose global alignment into local alignments by deconnecting structure edges
#determine set of structure edges
orgpaths=set([G.graph['path2id'][p] for p in G.graph['paths'] if p.startswith('*')])
refpaths=set([G.graph['path2id'][p] for p in G.graph['paths'] if not p.startswith('*')])
es=[]
for e0,e1,d in G.edges(data=True):
if len(d['paths'] & refpaths)==0: #edge that exclusively represents structural event
es.append((e0,e1))
toremove=es
G.remove_edges_from(toremove)
sizes={sid:0 for sid in G.graph['id2path']}
with open(filename,'w') as maf:
for g in nx.weakly_connected_component_subgraphs(G):
longest=0
sids=set()
for node in nx.topological_sort(g):
if type(node)!=str:
go=max([0]+[G.node[pred]['graphoffset']+len(G.node[pred]['seq']) for pred in G.predecessors(node) if type(pred)!=str])
G.node[node]['graphoffset']=go
if go+len(G.node[node]['seq'])>longest:
longest=go+len(G.node[node]['seq'])
for k in G.node[node]['offsets']:
sids.add(k)
if G.node[node]['offsets'][k]+len(G.node[node]['seq'])>sizes[k]:
sizes[k]=G.node[node]['offsets'][k]+len(G.node[node]['seq'])
ml=max([len(p) for p in G.graph['paths']])
maf.write("##maf version=1\n")
maf.write("a\n")
for sid in sids:
path=G.graph['id2path'][sid]
o=0
sl=0
maf.write("s %s %d %d + %-10d "%(path.ljust(ml), 0, sizes[G.graph['path2id'][path]], sizes[G.graph['path2id'][path]]) )
for node in nx.topological_sort(g):
if type(node)!=str and sid in G.node[node]['offsets']:
while o<G.node[node]['graphoffset']:
maf.write("-")
o+=1
sl+=len(G.node[node]['seq'].replace("-",""))
maf.write("%s"%G.node[node]['seq'])
o+=len(G.node[node]['seq'])
maf.write("-"*(longest-o)) #pad with dash so all lines are equally long
maf.write("\n")
maf.write("\n")
|
the-stack_0_2259 | from collections import Counter
from itertools import product
def count_letters(input_):
"""
Given an input_ like "abcdef" return a tuple with the result of the following rules
a letter appears exactly 2 times
a letter appears exactly 3 times
"""
counter = Counter(input_)
two_times = 0
three_times = 0
for quantity in counter.values():
if quantity == 2:
two_times += 1
elif quantity == 3:
three_times += 1
return two_times > 0, three_times > 0
def check_repeated(str1, str2):
"""
Given 2 string check if these strings have a diff of 1 character
if it's the case return the position of the character otherwise return None
"""
assert len(str1) == len(str2)
position = None
quantity = 0
for i in range(len(str1)):
if str1[i] != str2[i]:
quantity += 1
position = i
if quantity == 1:
return position
else:
return None
def run():
with open("../inputs/day02.txt") as f:
lines = f.readlines()
two_times, three_times = 0, 0
for line in lines:
if not line:
continue
two, three = count_letters(line)
if two:
two_times += 1
if three:
three_times += 1
print(f"The checksum is {two_times * three_times}")
for id_1, id_2 in product(lines, lines):
if id_1 == id_2:
continue
pos = check_repeated(id_1, id_2)
if pos is not None:
res = id_1[0:pos] + id_1[pos + 1:]
print(f"The result is {res}")
|
the-stack_0_2260 | """Cloud optical properties from ECHAM."""
from os.path import dirname, join
import numpy as np
import xarray as xr
from scipy.interpolate import interp1d
class EchamCloudOptics:
"""Interface to interpolate cloud optical properties used in ECHAM."""
def __init__(self):
self.database = xr.open_dataset(
join(dirname(__file__), "data", "ECHAM6_CldOptProps.nc")
)
def interp_ice_properties(self, particle_size=100.0, kind="linear"):
x = self.database.re_crystal
r = interp1d(
x,
self.database[f"co_albedo_crystal"],
axis=1,
kind=kind,
)
s = interp1d(
x,
self.database[f"asymmetry_factor_crystal"],
axis=1,
kind=kind,
)
t = interp1d(
x,
self.database[f"extinction_per_mass_crystal"],
axis=1,
kind=kind,
)
return (
1 - r(particle_size)[16:], # SW bands
s(particle_size)[16:],
t(particle_size)[16:],
t(particle_size)[:16], # LW bands
)
def interp_liquid_properties(self, particle_size=10.0, kind="linear"):
x = self.database.re_droplet
r = interp1d(
x,
self.database[f"co_albedo_droplet"],
axis=1,
kind="linear",
)
s = interp1d(
x,
self.database[f"asymmetry_factor_droplet"],
axis=1,
kind="linear",
)
t = interp1d(
x,
self.database[f"extinction_per_mass_droplet"],
axis=1,
kind="linear",
)
return (
1 - r(particle_size)[16:],
s(particle_size)[16:],
t(particle_size)[16:],
t(particle_size)[:16],
)
def get_cloud_properties(self, particle_size, water_path, phase="ice"):
if phase == "ice":
ssa, asym, tau_sw, tau_lw = self.interp_ice_properties(particle_size)
elif phase == "liquid":
ssa, asym, tau_sw, tau_lw = self.interp_liquid_properties(particle_size)
else:
raise ValueError('Invalid phase. Allowed values are "ice" and "liquid".')
cld_optics = xr.Dataset(
coords={
"num_shortwave_bands": np.arange(14),
"num_longwave_bands": np.arange(16),
},
)
cld_optics["single_scattering_albedo_due_to_cloud"] = (
("num_shortwave_bands",),
ssa.ravel(),
)
cld_optics["cloud_asymmetry_parameter"] = (
("num_shortwave_bands",),
asym.ravel(),
)
cld_optics["cloud_forward_scattering_fraction"] = (
("num_shortwave_bands",),
asym.ravel() ** 2,
)
cld_optics["shortwave_optical_thickness_due_to_cloud"] = (
("num_shortwave_bands",),
water_path * tau_sw.ravel(),
)
cld_optics["longwave_optical_thickness_due_to_cloud"] = (
("num_longwave_bands",),
water_path * tau_lw.ravel(),
)
return cld_optics
|
the-stack_0_2262 | import os, pymysql, logging, matplotlib, sys
from logging.handlers import RotatingFileHandler
from flask import Flask
from config import app_config
from .utils.mattermostdriver import Driver
config_name = os.getenv('FLASK_CONFIG', 'default')
app = Flask(__name__)
# load the color list from the matplotlib
color_list = set()
for name, hex in matplotlib.colors.cnames.items():
if name.startswith("light"): color_list.add(hex)
# inherit the configuration object from the config file
app.config.from_object(app_config[config_name])
app_config = app_config[config_name]
logger = logging.getLogger(__name__)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(app_config.LOG_FILE_PATH, maxBytes=10000000, backupCount=5)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
try:
mysql = pymysql.connect(host=app_config.MYSQL_HOST,
port=app_config.MYSQL_PORT,
user=app_config.MYSQL_USER,
password=app_config.MYSQL_PASSWORD,
db=app_config.MYSQL_DB,
cursorclass=pymysql.cursors.DictCursor)
except Exception as e:
logger.critical("Not able to connect to MySQL Server. Can't proceed further. Shutting down gracefully", exc_info=True)
sys.exit()
default_options = {
'scheme': app_config.MM_SCHEME,
'url': app_config.MM_URL,
'port': app_config.MM_PORT,
'basepath': '/api/v4',
'verify': True,
'timeout': 30,
'request_timeout': None,
'login_id': None,
'password': None,
'token': app_config.MM_BOT_TOKEN,
'mfa_token': None,
'auth': None,
'debug': False
}
SLASH_TOKEN = app_config.MM_SLASH_TOKEN
mm_client = Driver(default_options)
try:
mm_client.login()
except Exception as e:
logger.critical("Not able to connect to MatterSQL Server. Can't proceed further. \
Shutting down gracefully", exc_info=True)
sys.exit()
DAILY_POINT_LIMIT = app_config.DAILY_POINT_LIMIT
PER_TRANSACTION_POINT_LIMIT = app_config.PER_TRANSACTION_POINT_LIMIT
INSERT_QUERY_STRING = "insert into transaction(channel_id, channel_name, from_user_id, from_user_name, points, to_user_id, to_user_name, post_id, insertionTime, message) values (\"%s\", \"%s\", \"%s\", \"%s\", %d, \"%s\", \"%s\", \"%s\", \"%s\", \"%s\");"
WEEKLY_THRESHOLD = app_config.WEEKLY_THRESHOLD
from .utils.helpers import *
# REGISTER BLUEPRINTS
from app.routes.index import main_service
app.register_blueprint(main_service, url_prefix='/v1/index') |
the-stack_0_2264 | from sqlalchemy import Column, Integer, String, ForeignKey, DateTime, Boolean
from sqlalchemy.orm import relationship
import datetime
from models.SessionWorkouts import SessionWorkouts
# from .SessionModel import Session
from database import Base
class Workout(Base):
__tablename__ = "workout"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique=True, nullable=False)
repetition = Column(Integer, unique=False, nullable=False)
set = Column(Integer, unique=False, nullable=False)
weight = Column(Integer, unique=False, nullable=False)
done = Column(Boolean, default=False)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
updated_at = Column(DateTime, default=datetime.datetime.utcnow)
sessions = relationship("Session", secondary=SessionWorkouts.__tablename__, backref="workout")
exercise_id = Column(Integer, ForeignKey("exercise.id"), nullable=False)
exercise = relationship("Exercise", back_populates="workout")
# session_has_user = relationship("SessionHasUser", back_populates="workout")
|
the-stack_0_2266 | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019 Antti Lukats <[email protected]>
# Copyright (c) 2019 msloniewski <[email protected]>
# Copyright (c) 2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
from migen import *
from litex_boards.platforms import c10lprefkit
from litex.soc.cores.clock import Cyclone10LPPLL
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MT48LC16M16
from litedram.phy import GENSDRPHY
from liteeth.phy.mii import LiteEthPHYMII
from litex.soc.cores.hyperbus import HyperRAM
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain(reset_less=True)
# # #
# Clk / Rst
clk12 = platform.request("clk12")
# PLL
self.submodules.pll = pll = Cyclone10LPPLL(speedgrade="-A7")
self.comb += pll.reset.eq(~platform.request("cpu_reset") | self.rst)
pll.register_clkin(clk12, 12e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90)
# SDRAM clock
self.comb += platform.request("sdram_clock").eq(self.cd_sys_ps.clk)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
mem_map = {
"hyperram": 0x20000000,
}
mem_map.update(SoCCore.mem_map)
def __init__(self, sys_clk_freq=int(50e6), with_led_chaser=True,
with_ethernet=False, with_etherbone=False,
**kwargs):
platform = c10lprefkit.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on C10 LP RefKit",
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# HyperRam ---------------------------------------------------------------------------------
self.submodules.hyperram = HyperRAM(platform.request("hyperram"))
self.add_wb_slave(self.mem_map["hyperram"], self.hyperram.bus)
self.add_memory_region("hyperram", self.mem_map["hyperram"], 8*1024*1024)
# SDR SDRAM --------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq)
self.add_sdram("sdram",
phy = self.sdrphy,
module = MT48LC16M16(sys_clk_freq, "1:1"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Ethernet / Etherbone ---------------------------------------------------------------------
if with_ethernet or with_etherbone:
self.submodules.ethphy = LiteEthPHYMII(
clock_pads = self.platform.request("eth_clocks"),
pads = self.platform.request("eth"))
if with_ethernet:
self.add_ethernet(phy=self.ethphy)
if with_etherbone:
self.add_etherbone(phy=self.ethphy)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on C10 LP RefKit")
parser.add_argument("--build", action="store_true", help="Build bitstream.")
parser.add_argument("--load", action="store_true", help="Load bitstream.")
parser.add_argument("--sys-clk-freq", default=50e6, help="System clock frequency.")
parser.add_argument("--with-ethernet", action="store_true", help="Enable Ethernet support.")
parser.add_argument("--with-etherbone", action="store_true", help="Enable Etherbone support.")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_ethernet = args.with_ethernet,
with_etherbone = args.with_etherbone,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".sof"))
if __name__ == "__main__":
main()
|
the-stack_0_2267 | import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from simupy.systems import LTISystem
from simupy.systems.symbolic import DynamicalSystem, dynamicsymbols
from simupy.block_diagram import BlockDiagram
from sympy.tensor.array import Array
legends = [r'$x_1(t)$', r'$x_2(t)$', r'$x_3(t)$', r'$u(t)$']
tF = 6
"""
This example shows the design of a linear quadratic regulator for a
nonlinear system linearized about the origin. It is stable for some initial
conditions, but not all initial conditions. The region of stability is not
dependent only on the distance from the origin.
"""
# construct system
x = Array(dynamicsymbols('x1:4'))
u = dynamicsymbols('u')
x1, x2, x3 = x
sys = DynamicalSystem(Array([-x1+x2-x3, -x1*x2-x2+u, -x1+u]), x, Array([u]))
# linearization to design LQR
t0 = 0
x0 = np.zeros((3, 1))
u0 = 0
A = sys.state_jacobian_equation_function(t0, x0, u0)
B = sys.input_jacobian_equation_function(t0, x0, u0)
# LQR gain
Q = np.matlib.eye(3)
R = np.matrix([1])
S = linalg.solve_continuous_are(A, B, Q, R,)
K = linalg.solve(R, B.T @ S).reshape(1, -1)
ctr_sys = LTISystem(-K)
# Construct block diagram
BD = BlockDiagram(sys, ctr_sys)
BD.connect(sys, ctr_sys)
BD.connect(ctr_sys, sys)
# case 1 - un-recoverable
sys.initial_condition = np.r_[1, 1, 2.25]
result1 = BD.simulate(tF)
plt.figure()
plt.plot(result1.t, result1.y)
plt.legend(legends)
plt.title('controlled system with unstable initial conditions')
plt.xlabel('$t$, s')
plt.tight_layout()
plt.show()
# case 2 - recoverable
sys.initial_condition = np.r_[5, -3, 1]
result2 = BD.simulate(tF)
plt.figure()
plt.plot(result2.t, result2.y)
plt.legend(legends)
plt.title('controlled system with stable initial conditions')
plt.xlabel('$t$, s')
plt.tight_layout()
plt.show()
|
the-stack_0_2269 | """Bit manipulation class."""
import math
from abc import abstractmethod
from copy import deepcopy
from typing import (
Any,
Container,
Iterable,
Iterator,
MutableSequence,
SupportsInt,
Tuple,
Union,
overload,
)
from biterator._biterators import biterate
from biterator.bits_exceptions import SubscriptError
from biterator.const import ONES, ZEROS, DirtyBits, ValidBit
class Bits(MutableSequence[bool]):
"""
Stores bits in a list-like object, supports all bit-wise operators.
Bits can be instantiated with:
* A string of binary e.g. "1010" or "0b1100_0010".
* A prefixed string of hexadecimals e.g. "0x1f 0xb2" or "0xbadc0de".
* A bytes-like object.
* An integer-like object with a specified bit_length.
* An Iterable containing any of: True, False, 0, 1, "0", "1".
* An Iterable of arbitrary objects specifed by 'ones' and 'zero' collections as arguments.
The add (+) operator functions as concatination only, and supports all of
the above schemes. Addition may be done by first casting to int.
Binary and hexadecimal representations may be accessed with the 'bin' and
'hex' properties and the 'decode' method may be used to read the bits as
bytes using a specified codec.
>>> bits = Bits(); bits.extend('1010'); bits.bin() # Concatenate regular strings of binary.
'0b1010'
>>> bits.extend(dict(value=15, bit_length=4)); bits.bin() # Concatenate bits from an integer.
'0b1010_1111'
>>> bits.extend(b"A"); bits.bin(compact=True) # Concatenate bytes-like objects.
'0b1010111101000001'
>>> Bits("0xFF") + "0b1001_1001" # Concatenation directly with (+) operator.
Bits("0b1111111110011001")
>>> Bits("1111 0011 0000 1010")[:8] # Instantiate with binary; slicing is supported.
Bits("0b11110011")
>>> Bits("0xAAAA")[0:8:2] # Instantiate with hex; advanced slicing
Bits("0b1111")
>>> Bits("1111") << 4 # Bitshift operators supported
Bits("0b11110000")
>>> Bits(15, bit_length=4) # Add bits from integers
Bits("0b1111")
>>> Bits(255, -8)
Traceback (most recent call last):
ValueError: 'bit_length' must be provided and must be greater than 0 for integer values
All bitwise operators are supported.
'NOR' mask example, left and right 'NOR' with eachother when the mask is active:
>>> mask_ = Bits('00001111')
>>> left_ = Bits('01010101')
>>> right = Bits('00110011')
>>> ((mask_ ^ left_) & (mask_ | left_) & ~(mask_ & right)).bin()
'0b0101_1000'
"""
# Pylint literally crashes on this line for some reason.
__slots__ = ["__bytes", "__last_byte", "__len_last_byte", "__len"] # pylint: disable=all (literally will crash)
__bytes: bytearray
__len: int
# Contains the trailing (incomplete) byte; has less than the 8 bits of an actual byte.
__last_byte: int
__len_last_byte: int
def __new__(cls, bit_values: Union[Iterable, int] = None, *args, **kwargs):
"""
Copy Bits object if passed as argument to Bits class.
>>> class BitsTest(Bits):
... def copy(self):
... print('copied')
... return super().copy()
>>> bits_1 = BitsTest('1010')
>>> bits_2 = BitsTest(bits_1)
copied
>>> bits_2 += '1111'
>>> bits_1
Bits("0b1010")
>>> bits_2
Bits("0b10101111")
"""
if isinstance(bit_values, cls):
return bit_values.copy()
return super().__new__(cls)
def __init__(
self,
bit_values: Union[Iterable, int] = None,
bit_length: int = None,
ones: Container = None,
zeros: Container = None,
):
"""
Create a new Bits object from an Iterable of bit like object.
Create from a string of binary e.g.: "1010", "0b1001", or "0b1001_1101"
Create from a string of hex values: "0xFA 0xDE", "0XFFAA", "0Xab"
Create from bytes-like objects.
Create from Iterable of arbitrary objects by specifying containers
of objects for 'ones' and 'zeros'.
>>> Bits("10011001")
Bits("0b10011001")
>>> Bits("ffffabababffff", ones={"a"}, zeros={"b"})
Bits("0b101010")
>>> Bits("0xFF")
Bits("0b11111111")
>>> Bits("MPMPEEMP", ones="M", zeros="EP")
Bits("0b10100010")
>>> Bits() + b"Hi"
Bits("0b0100100001101001")
>>> def double_gen(size: int):
... if size:
... yield size % 4 < 2
... yield from double_gen(size - 1)
>>> Bits(double_gen(16)) # Supports generators
Bits("0b1001100110011001")
>>> Bits(255, 8)
Bits("0b11111111")
>>> Bits(255)
Traceback (most recent call last):
ValueError: 'bit_length' must be provided and must be greater than 0 for integer values
:param bit_values: Values to initialize a Bits object with.
:param bit_length: Bit length if an integer is given for bit_values.
:param ones: If set, symbols in this collection will represent True bits.
:param zeros: If set, symbols in this collection will represent False bits.
"""
self.__bytes = bytearray()
self.__len_last_byte = 0
self.__last_byte = 0
self.__len = 0
if bit_values is None and any(arg is not None for arg in (bit_length, ones, zeros)):
raise ValueError("unexpected argument, 'bit_values' must be set or there must be no other args set")
elif bit_values is not None:
for value in biterate(bit_values, bit_length=bit_length, ones=ones, zeros=zeros):
self.append(value)
@classmethod
def _clean_bits(
cls,
dirty_bits: DirtyBits,
ones: Container = None,
zeros: Container = None,
) -> Iterator[bool]:
# noinspection PyUnresolvedReferences
"""
Attempt, by a biterator, to iterate over `dirty_bits`; yields Booleans.
`dirty_bits` can be a dictionary of the form {"value": 15, "bit_length": 4}
to iterate over the bits of an integer.
>>> list(Bits._clean_bits(dict(value=255, bit_length=8))) == [True] * 8
True
>>> "".join("1" if bit else "0" for bit in Bits._clean_bits((1, 0, 0, 1)))
'1001'
>>> list(Bits._clean_bits(dict(value=255)))
Traceback (most recent call last):
ValueError: unsupported dict format {'value': 255}
:param dirty_bits: The bits containing object.
:param ones: If set, symbols in this collection will represent True bits.
:param zeros: If set, symbols in this collection will represent False bits.
"""
# Iterate from another Bits object.
if isinstance(dirty_bits, cls):
yield from dirty_bits
return
# Biterate an integer
if isinstance(dirty_bits, dict):
if "value" in dirty_bits and "bit_length" in dirty_bits:
bit_values = dirty_bits["value"]
bit_length = dirty_bits["bit_length"]
yield from biterate(bit_values=bit_values, bit_length=bit_length)
return
raise ValueError(f"unsupported dict format {repr(dirty_bits)}")
# Biterate other values
yield from biterate(bit_values=dirty_bits, ones=ones, zeros=zeros)
def copy(self) -> "Bits":
"""
Return a deep copy of the Bits object.
>>> bits_1 = Bits('1111')
>>> bits_2 = bits_1.copy()
>>> bits_2 += '1111'
>>> bits_2.bin(True, prefix="")
'11111111'
>>> bits_1.bin(True, prefix="")
'1111'
"""
return deepcopy(self)
def __repr__(self) -> str:
# noinspection PyUnresolvedReferences
"""
Represent the Bits object.
Equivalent to code which would create an identical object
but only up to a size of 64 bytes; after which it is abbreviated.
>>> Bits([1, 0]*32)
Bits("0b1010101010101010101010101010101010101010101010101010101010101010")
>>> exec("bits = " + repr(Bits([1, 0]*32))); bits == Bits([1, 0]*32)
True
>>> Bits('0xCAB00D1E'*6)
Bits(4969887947907717934627081996608040267272832614365316255006, 192)
>>> exec("bits = " + repr(Bits('0xCAB00D1E'*6))); bits == Bits('0xCAB00D1E'*6)
True
>>> Bits('0xBA5EBA11'*10)
Bits("0xBA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11BA5EBA11")
>>> exec("bits = " + repr(Bits('0xBA5EBA11'*10))); bits == Bits('0xBA5EBA11'*10)
True
>>> Bits('0xDEADBEEF'*10) + '1'
Bits("0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF80", 321)
>>> exec("bits = " + repr(Bits('0xDEADBEEF'*10) + '1')); bits == Bits('0xDEADBEEF'*10) + '1'
True
>>> Bits('0x0DDBA11'*200)
Bits("0x0D 0xDB 0xA1 ... 0xDD 0xBA 0x11", bit_length=5_600)
>>> Bits('0x0DDBA11'*200) + '1001'
Bits("0x0D 0xDB 0xA1 ... 0xBA 0x11 0x90", bit_length=5_604)
"""
if self.__len <= 64:
return f'Bits("{format(int(self), f"#0{self.__len + 2}b")}")'
largest_possible_decimal = int(math.log(1 << self.__len, 10))
if largest_possible_decimal <= 64:
return f'Bits({format(int(self), f"0{largest_possible_decimal}d")}, {self.__len})'
if self.__len // 8 <= 64:
if self.__len_last_byte > 0:
return f'Bits("{self.hex(compact=True)}", {self.__len})'
return f'Bits("{self.hex(compact=True)}")'
length_str = f"bit_length={self.__len:_d}"
if self.__len_last_byte > 0:
return f'Bits("{self[:24].hex()} ... {self[-(self.__len_last_byte + 16):].hex()}", {length_str})'
return f'Bits("{self[:24].hex()} ... {self[-24:].hex()}", {length_str})'
def iter_bytes(self) -> Iterator[int]:
# noinspection PyUnresolvedReferences
"""
Generate bytes from the bits.
Yield an integer representation of each byte, uless `as_bytes` is set,
in which case return single `bytes` objects.
An incomplete byte will be written from the left, for example:
>>> for byte in Bits('10101010 1111').iter_bytes(): print(bin(byte))
0b10101010
0b11110000
:return: The Iterator.
"""
yield from self.__bytes
if self.__len_last_byte:
yield self.__last_byte << 8 - self.__len_last_byte
def __bytes__(self) -> bytes:
r"""
Return bytes object; an incomplete byte is written from the left.
For example:
>>> bytes(Bits('1111'))
b'\xf0'
"""
return bytes(self.iter_bytes())
def decode(self, *args, **kwargs) -> str:
"""
Decode the bytes using the codec registered for encoding.
Wraps the `bytes.decode()` method.
>>> Bits('01101000 01101001 00100000 01101101 01100001 01110010 01101011').decode('utf-8')
'hi mark'
:param args:
:param kwargs:
:return:
"""
return bytes(self).decode(*args, **kwargs)
def __bool__(self):
"""Return true if not empty."""
for byte in self.__bytes:
if byte > 0:
return True
return bool(self.__last_byte)
def _byte_bit_indices(self, index: int) -> Tuple[int, int, int]:
"""
Calculate byte index, bit index (on the byte), and apply clipping to the original index.
:param index: The index to calculate.
:return: The tuple with computed index values.
"""
if index >= self.__len or index < -self.__len:
raise IndexError
# Modulo corrects negative indices
if index < 0:
index %= self.__len
# The first is the index of the byte that the index is within.
# The second is the index of the bit within the byte (counting from the left).
return index // 8, index % 8, index
@classmethod
def _clean_bit(cls, value: ValidBit) -> bool:
"""
Ensure bit is a ValidBit, cast to bool.
>>> Bits._clean_bit('1')
True
>>> Bits._clean_bit(0)
False
>>> Bits._clean_bit('a')
Traceback (most recent call last):
TypeError: could not determine single bit value for 'a'
:param value: The value to check.
:return: The bool representation.
"""
if value in ONES:
return True
if value in ZEROS:
return False
raise TypeError(f"could not determine single bit value for {repr(value)}")
def insert(self, index: int, value: ValidBit) -> None:
"""
Insert a bit at given index.
>>> bits = Bits('001'); bits.insert(0, True); bits.bin()
'0b1001'
>>> for _ in range(4): bits.insert(len(bits), False)
>>> bits.bin()
'0b1001_0000'
>>> bits.insert(5, "1"); bits.bin()
'0b1001_0100 0b0'
>>> bits.insert(-2, 1); bits.bin()
'0b1001_0101 0b00'
>>> bits = Bits('11110000 11110000 11110000 '); bits.insert(5, "1"); bits.bin(prefix="", group=False)
'11110100 01111000 01111000 0'
>>> bits.insert(0, 'g')
Traceback (most recent call last):
TypeError: could not determine single bit value for 'g'
:param index: The index at whitch to insert the bit.
:param value: The bit to be inserted.
"""
if not isinstance(value, bool):
value = self._clean_bit(value)
# If the index is above the length, set it to the length.
# If the index is below the negative length, set it to the negative length.
# Then if the new index is negative, take the modulo to get the correct positive index.
if self.__len == 0:
index = 0
else:
if index >= 0:
index = min(self.__len, index)
else:
index = max(-self.__len, index) % self.__len
byte_index, bit_index = index // 8, index % 8
# If appending to the end.
if index == self.__len:
self.__last_byte = (self.__last_byte << 1) | value
self._increment_last_byte()
# If inserting within the last (incomplete) byte.
elif byte_index == len(self.__bytes):
self.__last_byte = self._insert_bit_in_byte(self.__last_byte, self.__len_last_byte, bit_index, value)
self._increment_last_byte()
# If inserting anywhere else.
else:
# Insert the bit then remove the rightmost bit to carry over into the next byte to the right.
new_byte = self._insert_bit_in_byte(self.__bytes[byte_index], 8, bit_index, value)
carry = new_byte & 1
new_byte >>= 1
# Append the byte with the carry over bit removed.
self.__bytes[byte_index] = new_byte
# Repeat for the remaining whole bytes to the right of the index.
for i in range(byte_index + 1, len(self.__bytes)):
new_byte = (carry << 8) | self.__bytes[i]
carry = new_byte & 1
new_byte >>= 1
self.__bytes[i] = new_byte
# Append the last carry bit to the last (incomplete) byte, and increment it's length.
self.__last_byte = (carry << self.__len_last_byte) | self.__last_byte
self._increment_last_byte()
def extend(self, values: DirtyBits) -> None:
"""Override of the mixin to add data validation."""
# Prevent race conditions by copying if extending by self
for v in self.copy() if values is self else self._clean_bits(values):
self.append(v)
@staticmethod
def _insert_bit_in_byte(byte: int, length: int, index: int, value: bool) -> int:
"""
Insert a bit in a byte, indexed from the left.
>>> bin(Bits._insert_bit_in_byte(0b1010010, 7, 4, True))
'0b10101010'
:param byte: Byte in which to insert the bit.
:param length: Length of the Byte.
:param index: Index at which to insert the bit.
:param value: Value to be inserted.
:return: Byte with new bit inserted.
"""
right_index = length - index
left_bits = byte >> right_index
right_bits = byte & ((1 << right_index) - 1)
return (((left_bits << 1) | value) << right_index) | right_bits
def _increment_last_byte(self) -> None:
"""
Call when a bit has been added anywhere in the last (incomplete) byte.
>>> bits = Bits(0b111_1111, 7); bits.last_byte_length
7
>>> bits.append(False); bits.last_byte_length
0
>>> len(bits)
8
"""
self.__len_last_byte += 1
self.__len += 1
if self.__len_last_byte == 8:
self.__bytes.append(self.__last_byte)
self.__last_byte = 0
self.__len_last_byte = 0
@overload
@abstractmethod
def __getitem__(self, i: int) -> bool:
"""Retrieve a bit."""
...
@overload
@abstractmethod
def __getitem__(self, s: slice) -> "Bits":
"""Retrieve a slice of bits."""
...
def __getitem__(self, index):
"""
Retrieve a bit or a slice of bits.
>>> Bits('0001 0000')[3]
True
>>> Bits('0001 0000')[-5]
True
>>> Bits('0001 1000')[3:5]
Bits("0b11")
>>> Bits("00001111 00110011 01010101")[:-16]
Bits("0b00001111")
>>> Bits("00001111 00110011 01010101")[-8:]
Bits("0b01010101")
>>> Bits('01001001')["s"]
Traceback (most recent call last):
biterator.bits_exceptions.SubscriptError: unsupported subscript, 'Bits' does not support 'str' subscripts
:param index: The index or slice to retrieve.
:return: The new Bits object or a bit value.
"""
if isinstance(index, int):
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the index is in the last (incomplete) byte.
if byte_index == len(self.__bytes):
return self._get_bit_from_byte(self.__last_byte, self.__len_last_byte, bit_index)
# If the index is anywhere else.
return self._get_bit_from_byte(self.__bytes[byte_index], 8, bit_index)
if isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# For the case where the slice starts from a whole byte.
if step == 1 and start % 8 == 0:
last_byte_index, last_bit_index = stop // 8, stop % 8
start_byte_index = start // 8
new = type(self)(self.__bytes[start_byte_index:last_byte_index])
# Append any remaining bits.
if last_bit_index:
for i in range(stop - last_bit_index, stop):
# Recurse into the branch for integers
new.append(self[i])
return new
# For all other cases (not particularly efficient).
new = type(self)()
for i in range(start, stop, step):
# Recurse into the branch for integers
new.append(self[i])
return new
raise SubscriptError(self, index)
@staticmethod
def _get_bit_from_byte(byte: int, length: int, index: int) -> bool:
"""
Return the bit value at the given index, indexed from the left.
>>> Bits._get_bit_from_byte(0b00000100, 8, 5)
True
:param byte: Byte from which to get a bit.
:param index: Index of bit to retrieve.
:param length: Length of byte.
:return: The value of the bit.
"""
right_index = length - index - 1
return bool((1 << right_index) & byte)
@overload
@abstractmethod
def __setitem__(self, i: int, o: ValidBit) -> None:
"""Set a bit."""
...
@overload
@abstractmethod
def __setitem__(self, s: slice, o: DirtyBits) -> None:
"""Set a slice of bits."""
...
def __setitem__(self, index, other):
"""
Set a bit or slice of bits.
>>> bits = Bits('1111 1111 1111'); bits[4:8] = '0000'; bits.bin()
'0b1111_0000 0b1111'
>>> bits[4:8] = 15; bits.bin()
'0b1111_1111 0b1111'
>>> bits[-4:] = '0000'; bits.bin()
'0b1111_1111 0b0000'
>>> bits[0] = False; bits.bin()
'0b0111_1111 0b0000'
:param index: The index or slice to modify.
:param other: The bit or bits to replace the old bit or bits.
"""
if isinstance(index, int):
other = self._clean_bit(other)
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the index is in the last (incomplete) byte.
if byte_index == len(self.__bytes):
self.__last_byte = self._set_bit_in_byte(self.__last_byte, self.__len_last_byte, bit_index, other)
# If the index is anywhere else.
else:
self.__bytes[byte_index] = self._set_bit_in_byte(self.__bytes[byte_index], 8, bit_index, other)
elif isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# Cast other to a Bits object
if isinstance(other, int):
other_bit = iter(type(self)(other, stop - start))
else:
other_bit = iter(type(self)(other))
try:
for i in range(start, stop, step):
# Recurse into the branch for integers
self[i] = next(other_bit)
except StopIteration:
pass
else:
raise SubscriptError(self, index)
@classmethod
def _set_bit_in_byte(cls, byte: int, length: int, index: int, value: bool) -> int:
"""
Modify a bit in a byte, indexed from the left.
>>> Bits._set_bit_in_byte(0b11011111, 8, 2, True)
255
:param byte: Byte in which to modify a bit.
:param length: Length of the byte.
:param index: Index of the bit to modify.
:param value: Value to modify the bit to.
:return: The Byte with bit modified.
"""
right_index = length - index - 1
# If the bit is the same, do nothing.
if bool((1 << right_index) & byte) == value:
return byte
# The bit is different, flip it.
return (1 << right_index) ^ byte
@overload
@abstractmethod
def __delitem__(self, i: int) -> None:
"""Remove a single bit."""
...
@overload
@abstractmethod
def __delitem__(self, i: slice) -> None:
"""Remove a slice."""
...
def __delitem__(self, index):
"""
Remove a bit or a slice.
>>> bits = Bits("1000 0000 0000 0100 0001"); del bits[13]; bits.bin()
'0b1000_0000 0b0000_0000 0b001'
>>> bits = Bits("1010 1010 1010 1010 0000"); del bits[1::2]; bits.bin()
'0b1111_1111 0b00'
>>> del bits[8:10]; bits.bin()
'0b1111_1111'
>>> del bits[-4:]; bits.bin()
'0b1111'
:param index: Index or slice to delete.
"""
if isinstance(index, int):
byte_index, bit_index, index = self._byte_bit_indices(index)
# If the bit deleted in in the last (incomplete) byte.
if byte_index == len(self.__bytes):
self.__last_byte = self._del_bit_from_byte(self.__last_byte, self.__len_last_byte, bit_index)
self._decrement_last_byte()
# All other cases.
else:
# Remove the bit from the target byte, then append the first bit from the next byte.
# Cascade similarly through the list of bytes.
new_byte = self._del_bit_from_byte(self.__bytes[byte_index], 8, bit_index)
for i in range(byte_index + 1, len(self.__bytes)):
first_bit = bool(self.__bytes[i] & 0b1000_0000)
self.__bytes[i - 1] = (new_byte << 1) | first_bit
new_byte = self.__bytes[i] & 0b0111_1111
# If the last (incomplete) byte is not empty, append the first bit from it.
if self.__len_last_byte:
first_bit = bool(self.__last_byte & (1 << self.__len_last_byte - 1))
self.__bytes[-1] = (new_byte << 1) | first_bit
# Truncate the first bit of the last (incomplete) byte.
self.__last_byte &= (1 << self.__len_last_byte - 1) - 1
# If the last (incomplete) byte is empty, remove the last full byte.
else:
self.__bytes.pop()
# The former last full byte becomes the last (incomplete) byte with it's first bit removed.
self.__last_byte = new_byte
# Decrement the length and last (incomplete) byte length in both cases.
self._decrement_last_byte()
elif isinstance(index, slice):
start, stop, step = index.indices(self.__len)
# NOTE: ***VERY inefficient*** Consider refactor.
# NOTE: Good opportunity to use interval library to remove all deleted bits and concat what remains.
# Always proceeds in reverse order to not mess up the indexing.
removal_indices = sorted(list(range(start, stop, step)), reverse=True)
for i in removal_indices:
del self[i]
else:
raise SubscriptError(self, index)
@staticmethod
def _del_bit_from_byte(byte: int, length: int, index: int) -> int:
"""
Remove a bit from a byte, indexed from the left.
>>> Bits._del_bit_from_byte(0b00010000, 8, 3)
0
:param byte: Byte from which to remove a bit.
:param length: Length of the byte.
:param index: Index of the bit to remove.
:return: The Byte with bit removed.
"""
right_index = length - index
left_bits = (byte >> right_index) << right_index - 1
right_bits = byte & ((1 << right_index - 1) - 1)
return left_bits | right_bits
def _decrement_last_byte(self) -> None:
"""
Call when a bit has been removed anywhere in the last (incomplete) byte.
>>> bits = Bits(0b010001000, 9); bits.last_byte_length
1
>>> del bits[0]; bits.last_byte_length
0
"""
self.__len_last_byte -= 1
self.__len -= 1
if self.__len_last_byte < 0:
self.__len_last_byte = 7
def __invert__(self) -> "Bits":
"""
Return a Bits object with each bit inverted.
>>> (~Bits('01001110')).bin()
'0b1011_0001'
:return: The Bits object with inverted bits.
"""
return type(self)(not bit for bit in self)
def __int__(self) -> int:
"""
Represent the sequence of bits as an int.
>>> int(Bits("0xff"))
255
>>> int(Bits("0xfff"))
4095
>>> int(Bits("0xffff"))
65535
:return: The integer representation.
"""
return (int.from_bytes(self.__bytes, "big") << self.__len_last_byte) | self.__last_byte
def __len__(self) -> int:
"""Total number of bits."""
return self.__len
def __lt__(self, other: SupportsInt) -> bool:
"""Int value of bits is less than the int value of other."""
if isinstance(other, SupportsInt):
return int(self) < int(other)
return NotImplemented
def __le__(self, other: SupportsInt) -> bool:
"""Int value of bits is less than or equal to the int value of other."""
if isinstance(other, SupportsInt):
return int(self) <= int(other)
return NotImplemented
def __eq__(self, other: Any) -> bool:
"""Bits are equal or Int value of Bits are equal to the int value of other."""
if isinstance(other, type(self)):
if all(
(
self.__len == other.__len,
self.__bytes == other.__bytes,
self.__last_byte == other.__last_byte,
),
):
return True
return False
if isinstance(other, SupportsInt):
return int(self) == int(other)
return NotImplemented
def __ne__(self, other: Any) -> bool:
"""Bits are not equal or Int value of Bits are not equal to the int value of other."""
if isinstance(other, type(self)):
if not all(
(
self.__len == other.__len,
self.__bytes == other.__bytes,
self.__last_byte == other.__last_byte,
),
):
return True
return False
if isinstance(other, SupportsInt):
return int(self) != int(other)
return NotImplemented
def __gt__(self, other: SupportsInt) -> bool:
"""Int value of bits is greater than the int value of other."""
if isinstance(other, SupportsInt):
return int(self) > int(other)
return NotImplemented
def __ge__(self, other: SupportsInt) -> bool:
"""Int value of bits is greater than or equal to the int value of other."""
if isinstance(other, SupportsInt):
return int(self) >= int(other)
return NotImplemented
# Concatenate
def __add__(self, other: DirtyBits) -> "Bits":
"""
Concatenate bits; NOT addition.
>>> (Bits("0110") + Bits("1001")).bin()
'0b0110_1001'
>>> (Bits("0110") + "1001").bin()
'0b0110_1001'
>>> (Bits("0110") + dict(value=15, bit_length=4)).bin() # Concat an integer
'0b0110_1111'
>>> bits = Bits('10'*10); bits += bits; bits.bin(True, "")
'1010101010101010101010101010101010101010'
>>> Bits('01000101') + b"Z"
Bits("0b0100010101011010")
>>> Bits('01000101') + "Z"
Traceback (most recent call last):
ValueError: non valid binary 'Z' was found in the string
:param other: Other object to be concatenated.
:return: New Bits object that is a concatenation of the inputs.
"""
if isinstance(other, (Iterable, dict)):
new = self.copy()
new.extend(other)
return new
return NotImplemented
def __radd__(self, other: DirtyBits) -> "Bits":
"""
Right concatenation.
>>> "1001" + Bits("0110")
Bits("0b10010110")
"""
if isinstance(other, (Iterable, dict)):
new = type(self)()
new.extend(other)
new.extend(self)
return new
return NotImplemented
def __iadd__(self, other: DirtyBits) -> "Bits":
"""
Extend in-place.
>>> bits = Bits("1111"); bits += "0000"; bits.bin()
'0b1111_0000'
>>> bits += dict(value=255, bit_length=8); bits.bin()
'0b1111_0000 0b1111_1111'
:param other: Bits to extend.
:return: The Bits object that was modified in place.
"""
if isinstance(other, (Iterable, dict)):
self.extend(other)
return self
return NotImplemented
# Left Bitshift
def __lshift__(self, index: int) -> "Bits":
"""
Left shift the bits.
>>> (Bits("1111") << 4).bin()
'0b1111_0000'
:param index: Number of places to shift
:return: Shifted Bits object
"""
if isinstance(index, SupportsInt):
new = self.copy()
new.extend(type(self)(0, int(index)))
return new
return NotImplemented
def __ilshift__(self, index: int) -> "Bits":
"""
Left bitshift in-place.
>>> bits = Bits("1111"); bits <<= 4; bits.bin()
'0b1111_0000'
:param index: Number of places to shift.
:return: The Bits object that was modified in place.
"""
if isinstance(index, SupportsInt):
self.extend({"value": 0, "bit_length": int(index)})
return self
return NotImplemented
# Right Bitshift
def __rshift__(self, index: int) -> "Bits":
"""
Right shift the bits.
>>> (Bits("11110000") >> 4).bin()
'0b1111'
:param index: Number of places to shift
:return: Shifted Bits object
"""
if isinstance(index, SupportsInt):
return type(self)(self[: -int(index)])
return NotImplemented
def __irshift__(self, index: int) -> "Bits":
"""
Right bitshift in-place.
>>> bits = Bits("1111 1111"); bits >>= 4; bits.bin()
'0b1111'
:param index: Number of places to shift.
:return: The Bits object that was modified in place.
"""
if index:
del self[-index:]
return self
# AND
def __and__(self, other: DirtyBits) -> "Bits":
"""
Bitwise and operation.
>>> (Bits('01111000') & Bits('00011110')).bin()
'0b0001_1000'
>>> (Bits('0111') & Bits('00011110')).bin()
'0b0001'
>>> (Bits("1110") & "0b0111").bin()
'0b0110'
>>> Bits("1110") & dict(value=7, bit_length=4)
Bits("0b0110")
:param other: Other Bits to 'and' with
:return: Combined Bits objects
"""
if isinstance(other, (Iterable, dict)):
return type(self)(a & b for a, b in zip(self, self._clean_bits(other)))
return NotImplemented
__rand__ = __and__
def __iand__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'and' with other bits; in-place.
>>> bits_ = Bits("1110"); bits_ &= "0111"; bits_.bin()
'0b0110'
:param other: The Iterable bits to 'and' with.
:return: The Bits object that was modified in place.
"""
if isinstance(other, (Iterable, dict)):
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] & bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
return NotImplemented
# XOR
def __xor__(self, other: DirtyBits) -> "Bits":
"""
Bitwise xor operation.
>>> (Bits('01111000') ^ Bits('00011110')).bin()
'0b0110_0110'
>>> (Bits('01111000') ^ '0b00011110').bin()
'0b0110_0110'
>>> (Bits("1110") ^ "0111").bin()
'0b1001'
:param other: Other Bits to 'xor' with
:return: Combined Bits objects
"""
if isinstance(other, (Iterable, dict)):
return type(self)(a ^ b for a, b in zip(self, self._clean_bits(other)))
return NotImplemented
__rxor__ = __xor__
def __ixor__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'xor' with other bits; in-place.
>>> bits_ = Bits("0110"); bits_ ^= "0101"; bits_.bin()
'0b0011'
:param other: The Iterable bits to 'xor' with.
:return: The Bits object that was modified in place.
"""
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] ^ bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
# OR
def __or__(self, other: DirtyBits) -> "Bits":
"""
Bitwise or operation.
>>> (Bits('01111000') | Bits('00011110')).bin()
'0b0111_1110'
>>> (Bits("1100") | "0011").bin()
'0b1111'
:param other: Other Bits to 'or' with
:return: Combined Bits objects
"""
return type(self)(a | b for a, b in zip(self, self._clean_bits(other)))
__ror__ = __or__
def __ior__(self, other: DirtyBits) -> "Bits":
"""
Bitwise 'or' with other bits; in-place.
>>> bits_ = Bits("1100"); bits_ |= "0011"; bits_.bin()
'0b1111'
:param other: The Iterable bits to 'or' with.
:return: The Bits object that was modified in place.
"""
len_other = 1
for index, bits in enumerate(zip(self, self._clean_bits(other))):
self[index] = bits[0] | bits[1]
len_other += 1
if self.__len > len_other:
del self[-len_other:]
return self
@property
def last_byte_length(self):
"""
If the totall number of bits is not divisible by 8, get the remainder.
This property gives the length of the last incomplete byte in the object.
>>> bits = Bits("10011001 1010"); bits[-bits.last_byte_length:].bin(True, prefix="")
'1010'
:return: Number of bits in the last incomplete byte.
"""
return self.__len_last_byte
def hex(self, compact: bool = False, prefix: str = "0x", sep: str = " ", fmt: str = None) -> str:
r"""
Return a string with hexadecimal representation of each byte.
NOTE: The prefix argument can be set to the empty string and then
enabled in the formatting argument if that is preferred.
>>> Bits("0b1111").hex()
'0xF0'
>>> Bits("0b00_1111").hex() # Interpreted as 0011_1100
'0x3C'
>>> Bits("0b1111_1111 0b1111").hex()
'0xFF 0xF0'
>>> Bits("0b1111_1111 0b1111_1111").hex(compact=True, prefix=r"\x")
'\\xFFFF'
>>> Bits("0b1011_0001 0b1010_1101 0b1110_0101").hex(prefix="", compact=True)
'B1ADE5'
>>> Bits("0b1111_1111 0b1111_1111").hex(compact=True, prefix='', fmt="4X")
' FF FF'
:param compact: No separators and only prefixed at the beggining.
:param prefix: Prefix for each byte, default: '0x'.
:param sep: Separator between bytes, default ' '.
:param fmt: Formatting for each byte.
:return: The string representation of the bytes as hexadecimal.
"""
if compact:
ret_str = prefix + "".join(format(byte, fmt or "02X") for byte in self.iter_bytes())
else:
ret_str = sep.join(prefix + format(byte, fmt or "02X") for byte in self.iter_bytes())
return ret_str
def bin(self, compact: bool = False, prefix: str = "0b", sep: str = " ", group: bool = True) -> str:
"""
Return a string with the binary representations of each byte.
NOTE: The prefix argument can be set to the empty string and then
enabled in the formatting argument if that is preferred.
>>> Bits(255, 8).bin()
'0b1111_1111'
>>> Bits(4095, 12).bin(prefix="")
'1111_1111 1111'
>>> Bits(65535, 16).bin(group=False)
'0b11111111 0b11111111'
>>> Bits("1111 11").bin()
'0b11_1111'
>>> Bits(43690, 16).bin(compact=True, prefix="")
'1010101010101010'
:param compact: No separators or grouping, only prefixed at the beggining.
:param prefix: Prefix on each byte, default '0b'.
:param sep: Spacer between bytes, default: ' '.
:param group: Digit grouping symbol, may be '_' or None default: '_'.
:return: The string of the bits in binary representation.
"""
if compact:
ret_str = "".join(format(byte, "08b") for byte in self.__bytes)
if self.__len_last_byte:
ret_str += format(self.__last_byte, f"0{self.__len_last_byte}b")
ret_str = prefix + ret_str
else:
ret_str = sep.join(prefix + format(byte, "09_b" if group else "08b") for byte in self.__bytes)
if self.__len_last_byte:
ret_str += sep if ret_str else ""
if group:
has_group = 1 if self.__len_last_byte > 4 else 0
last_byte_fmt = f"0{self.__len_last_byte + has_group}_b"
else:
last_byte_fmt = f"0{self.__len_last_byte}b"
ret_str += prefix + format(self.__last_byte, last_byte_fmt)
return ret_str
|
the-stack_0_2270 | import asyncio
import logging
from zof.event import load_event
LOGGER = logging.getLogger(__package__)
class Protocol(asyncio.SubprocessProtocol):
"""Implements an asyncio Protocol for parsing data received from oftr.
"""
def __init__(self, post_event):
self.post_event = post_event
self.buf = b''
self.exit_future = asyncio.Future()
def pipe_data_received(self, fd, data):
LOGGER.debug('zof.Protocol.pipe_data_received: %d bytes, fd=%d',
len(data), fd)
begin = 0
offset = len(self.buf)
self.buf += data
while True:
offset = self.buf.find(b'\x00', offset)
if offset < 0:
self.buf = self.buf[begin:]
return
if begin != offset:
self.post_event(load_event(self.buf[begin:offset]))
offset += 1
begin = offset
def pipe_connection_lost(self, fd, exc):
if exc is not None:
LOGGER.warning('zof.Protocol.pipe_connection_lost: fd=%d, exc=%r',
fd, exc)
def process_exited(self):
LOGGER.debug('zof.Protocol.process_exited')
self.post_event(load_event(b''))
self.exit_future.set_result(0)
|
the-stack_0_2271 | """
WRITEME
"""
from __future__ import absolute_import, print_function, division
import logging
import theano
from theano import gof
import theano.gof.vm
from theano.configparser import config
from theano.compile.ops import _output_guard
from six import string_types
_logger = logging.getLogger('theano.compile.mode')
# If a string is passed as the linker argument in the constructor for
# Mode, it will be used as the key to retrieve the real linker in this
# dictionary
predefined_linkers = {
'py': gof.PerformLinker(), # Use allow_gc Theano flag
'c': gof.CLinker(), # Don't support gc. so don't check allow_gc
'c|py': gof.OpWiseCLinker(), # Use allow_gc Theano flag
'c|py_nogc': gof.OpWiseCLinker(allow_gc=False),
'vm': gof.vm.VM_Linker(use_cloop=False), # Use allow_gc Theano flag
'cvm': gof.vm.VM_Linker(use_cloop=True), # Use allow_gc Theano flag
'vm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=False),
'cvm_nogc': gof.vm.VM_Linker(allow_gc=False, use_cloop=True)}
def register_linker(name, linker):
"""Add a `Linker` which can be referred to by `name` in `Mode`."""
if name in predefined_linkers:
raise ValueError('Linker name already taken: %s' % name)
predefined_linkers[name] = linker
# If a string is passed as the optimizer argument in the constructor
# for Mode, it will be used as the key to retrieve the real optimizer
# in this dictionary
exclude = []
if not theano.config.cxx:
exclude = ['cxx_only']
OPT_NONE = gof.Query(include=[], exclude=exclude)
# Even if multiple merge optimizer call will be there, this shouldn't
# impact performance.
OPT_MERGE = gof.Query(include=['merge'], exclude=exclude)
OPT_FAST_RUN = gof.Query(include=['fast_run'], exclude=exclude)
OPT_FAST_RUN_STABLE = OPT_FAST_RUN.requiring('stable')
# We need fast_compile_gpu here. As on the GPU, we don't have all
# operation that exist in fast_compile, but have some that get
# introduced in fast_run, we want those optimization to also run in
# fast_compile+gpu. We can't tag them just as 'gpu', as this would
# exclude them if we exclude 'gpu'.
OPT_FAST_COMPILE = gof.Query(include=['fast_compile', 'fast_compile_gpu'],
exclude=exclude)
OPT_STABILIZE = gof.Query(include=['fast_run'], exclude=exclude)
OPT_STABILIZE.position_cutoff = 1.5000001
OPT_NONE.name = 'OPT_NONE'
OPT_MERGE.name = 'OPT_MERGE'
OPT_FAST_RUN.name = 'OPT_FAST_RUN'
OPT_FAST_RUN_STABLE.name = 'OPT_FAST_RUN_STABLE'
OPT_FAST_COMPILE.name = 'OPT_FAST_COMPILE'
OPT_STABILIZE.name = 'OPT_STABILIZE'
predefined_optimizers = {
None: OPT_NONE,
'None': OPT_NONE,
'merge': OPT_MERGE,
'fast_run': OPT_FAST_RUN,
'fast_run_stable': OPT_FAST_RUN_STABLE,
'fast_compile': OPT_FAST_COMPILE,
'stabilize': OPT_STABILIZE}
def register_optimizer(name, opt):
"""Add a `Optimizer` which can be referred to by `name` in `Mode`."""
if name in predefined_optimizers:
raise ValueError('Optimizer name already taken: %s' % name)
predefined_optimizers[name] = opt
class AddDestroyHandler(gof.Optimizer):
"""
This optimizer performs two important functions:
1) It has a 'requirement' of the destroyhandler. This means that the fgraph
will include it as a feature for this optimization, and keep this feature
enabled for subsequent optimizations. All optimizations that work inplace
on any of their inputs must run *after* this optimization to ensure that
the DestroyHandler has been included in the fgraph.
2) It tries to replace each output with an Op that purports to destroy it
(but it won't I promise). If this replacement succeeds it means that
there is a bug in theano. It should not be possible to destroy outputs.
"""
def apply(self, fgraph):
for o in fgraph.outputs:
try:
fgraph.replace_validate(o, _output_guard(o),
reason='output_guard')
_logger.info("Output variable %s required output_guard, "
"how was this output left unprotected against "
"destructive operations?"
% o)
except gof.InconsistencyError:
# This output is already impossible to destroy.
# No guard necessary
pass
def add_requirements(self, fgraph):
super(AddDestroyHandler, self).add_requirements(fgraph)
fgraph.attach_feature(gof.DestroyHandler())
class AddFeatureOptimizer(gof.Optimizer):
"""
This optimizer adds a provided feature to the function graph.
"""
def __init__(self, feature):
self.feature = feature
def add_requirements(self, fgraph):
super(AddFeatureOptimizer, self).add_requirements(fgraph)
fgraph.attach_feature(self.feature)
class PrintCurrentFunctionGraph(gof.Optimizer):
"""
This optimizer is for debugging.
Toss it into the optimization pipeline to see the state of things at any
given point.
"""
def __init__(self, header):
self.header = header
def apply(self, fgraph):
import theano.printing
print("PrintCurrentFunctionGraph:", self.header)
theano.printing.debugprint(fgraph.outputs)
optdb = gof.SequenceDB()
optdb.register('merge1', gof.MergeOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
# After scan1 opt at 0.5 and before ShapeOpt at 1
# This should only remove nodes.
# The opt should not do anything that need shape inference.
# New nodes that don't have infer_shape need that the original node
# also don't have infer_shape
local_useless = gof.optdb.LocalGroupDB(apply_all_opts=True, profile=True)
optdb.register(
'useless',
gof.optdb.TopoDB(local_useless,
failure_callback=gof.opt.NavigatorOptimizer.warn_inplace),
0.6, 'fast_run', 'fast_compile')
optdb.register('merge1.1', gof.MergeOptimizer(),
0.65, 'fast_run', 'fast_compile', 'merge')
# rearranges elemwise expressions
optdb.register('canonicalize', gof.EquilibriumDB(ignore_newtrees=False),
1, 'fast_run', 'fast_compile', 'canonicalize_db')
# Register in the canonizer Equilibrium as a clean up opt the merge opt.
# Without this, as the equilibrium have ignore_newtrees=False, we
# won't merge all nodes if it is set as a global optimizer with
# final_opt=True.
# We need a new instance of MergeOptimizer to don't have its name
# changed by other usage of it.
optdb['canonicalize'].register("merge", gof.opt.MergeOptimizer(), 'fast_run',
"fast_compile", cleanup=True)
optdb.register('merge1.2', gof.MergeOptimizer(),
1.2, 'fast_run', 'fast_compile', 'merge')
optdb.register('Print1.21', PrintCurrentFunctionGraph('Post-canonicalize'),
1.21,) # 'fast_run', 'fast_compile')
# replace unstable subgraphs
optdb.register('stabilize', gof.EquilibriumDB(),
1.5, 'fast_run')
optdb.register('Print1.51', PrintCurrentFunctionGraph('Post-stabilize'),
1.51,) # 'fast_run', 'fast_compile')
# misc special cases for speed
optdb.register('specialize', gof.EquilibriumDB(),
2, 'fast_run', 'fast_compile_gpu')
# misc special cases for speed that break canonicalization
optdb.register('uncanonicalize', gof.EquilibriumDB(),
3, 'fast_run')
# misc special cases for speed that are dependent on the device.
optdb.register('specialize_device', gof.EquilibriumDB(),
48.6, 'fast_compile', 'fast_run') # must be after gpu stuff at 48.5
# especially constant merge
optdb.register('merge2', gof.MergeOptimizer(),
49, 'fast_run', 'merge')
optdb.register('add_destroy_handler', AddDestroyHandler(),
49.5, 'fast_run', 'inplace')
# final pass just to make sure
optdb.register('merge3', gof.MergeOptimizer(),
100, 'fast_run', 'merge')
class Mode(object):
"""
The Mode represents a way to optimize and then link a computation graph.
Parameters
----------
optimizer : a structure of type Optimizer
An Optimizer may simplify the math, put similar computations together,
improve numerical stability and various other improvements.
linker : a structure of type Linker
A Linker decides which implementations to use (C or Python, for example)
and how to string them together to perform the computation.
See Also
--------
predefined_linkers
predefined_optimizers
predefined_modes
"""
def __init__(self, linker=None, optimizer='default'):
if linker is None:
linker = config.linker
if optimizer is 'default':
optimizer = config.optimizer
Mode.__setstate__(self, (linker, optimizer))
# self.provided_optimizer - typically the `optimizer` arg.
# But if the `optimizer` arg is keyword corresponding to a predefined
# Query, then this stores the query
# self._optimizer - typically same as provided_optimizer??
# self.__get_optimizer - returns self._optimizer (possibly querying
# optdb with self._optimizer)
# self.optimizer - property that returns __get_optimizer()
def __getstate__(self):
return (self.provided_linker, self.provided_optimizer)
def __setstate__(self, state):
linker, optimizer = state
self.provided_linker = linker
self.provided_optimizer = optimizer
if isinstance(linker, string_types) or linker is None:
linker = predefined_linkers[linker]
self.linker = linker
if isinstance(optimizer, string_types) or optimizer is None:
optimizer = predefined_optimizers[optimizer]
if isinstance(optimizer, gof.Query):
self.provided_optimizer = optimizer
self._optimizer = optimizer
self.call_time = 0
self.fn_time = 0
linker.mode = self # TODO: WHY IS THIS HERE?
def __str__(self):
return "%s(linker = %s, optimizer = %s)" % (self.__class__.__name__,
self.provided_linker,
self.provided_optimizer)
def __get_optimizer(self):
if isinstance(self._optimizer, gof.Query):
return optdb.query(self._optimizer)
else:
return self._optimizer
optimizer = property(__get_optimizer)
def get_linker_optimizer(self, linker, optimizer):
if isinstance(linker, string_types) or linker is None:
linker = predefined_linkers[linker]
if isinstance(optimizer, string_types) or optimizer is None:
optimizer = predefined_optimizers[optimizer]
return (linker, optimizer)
def including(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
# N.B. opt might be a Query instance, not sure what else it might be...
# string? Optimizer? OptDB? who knows???
return self.clone(optimizer=opt.including(*tags))
def register(self, *optimizations):
"""Adds new optimization instances to a mode.
This method adds new optimization instances to a compilation mode. It
works like the `including()` method but takes as inputs optimization
instances to add instead of tags.
Parameters
----------
optimizations :
Every element of `optimizations` is a tuple containing an
optimization instance and a floating point value indicating the
position at which to insert the optimization in the mode.
Returns
-------
Mode
Copy of the current Mode which includes the provided
optimizations.
"""
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.register(*optimizations))
def excluding(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.excluding(*tags))
def requiring(self, *tags):
link, opt = self.get_linker_optimizer(self.provided_linker,
self.provided_optimizer)
return self.clone(optimizer=opt.requiring(*tags))
def clone(self, link_kwargs=None, optimizer="", **kwargs):
"""
Create a new instance of this Mode.
Keyword arguments can be provided for the linker,
in which case its `clone` method will be called with these
arguments.
"""
if link_kwargs is None:
link_kwargs = {}
new_linker = self.linker.clone(**link_kwargs)
if optimizer == "":
optimizer = self.provided_optimizer
new_mode = type(self)(linker=new_linker,
optimizer=optimizer)
return new_mode
# If a string is passed as the mode argument in function or
# FunctionMaker, the Mode will be taken from this dictionary using the
# string as the key
# Use VM_linker to allow lazy evaluation by default.
FAST_COMPILE = Mode(theano.gof.vm.VM_Linker(use_cloop=False, c_thunks=False),
'fast_compile')
if theano.config.cxx:
FAST_RUN = Mode('cvm', 'fast_run')
else:
FAST_RUN = Mode('vm', 'fast_run')
predefined_modes = {'FAST_COMPILE': FAST_COMPILE,
'FAST_RUN': FAST_RUN,
}
instantiated_default_mode = None
def get_mode(orig_string):
if orig_string is None:
string = config.mode
else:
string = orig_string
if not isinstance(string, string_types):
return string # it is hopefully already a mode...
global instantiated_default_mode
# The default mode is cached. However, config.mode can change
# If instantiated_default_mode has the right class, use it.
if orig_string is None and instantiated_default_mode:
if string in predefined_modes:
default_mode_class = predefined_modes[string].__class__.__name__
else:
default_mode_class = string
if (instantiated_default_mode.__class__.__name__ ==
default_mode_class):
return instantiated_default_mode
if string in ['Mode', 'DebugMode', 'NanGuardMode']:
if string == 'DebugMode':
# need to import later to break circular dependency.
from .debugmode import DebugMode
# DebugMode use its own linker.
ret = DebugMode(optimizer=config.optimizer)
elif string == 'NanGuardMode':
# need to import later to break circular dependency.
from .nanguardmode import NanGuardMode
# NanGuardMode use its own linker.
ret = NanGuardMode(True, True, True, optimizer=config.optimizer)
else:
# TODO: Can't we look up the name and invoke it rather than using eval here?
ret = eval(string +
'(linker=config.linker, optimizer=config.optimizer)')
elif string in predefined_modes:
ret = predefined_modes[string]
else:
raise Exception("No predefined mode exist for string: %s" % string)
if orig_string is None:
# Build and cache the default mode
if theano.config.optimizer_excluding:
ret = ret.excluding(*theano.config.optimizer_excluding.split(':'))
if theano.config.optimizer_including:
ret = ret.including(*theano.config.optimizer_including.split(':'))
if theano.config.optimizer_requiring:
ret = ret.requiring(*theano.config.optimizer_requiring.split(':'))
instantiated_default_mode = ret
return ret
def get_default_mode():
return get_mode(None)
def register_mode(name, mode):
"""
Add a `Mode` which can be referred to by `name` in `function`.
"""
if name in predefined_modes:
raise ValueError('Mode name already taken: %s' % name)
predefined_modes[name] = mode
|
the-stack_0_2275 | '''
Written by Heng Fan
在ILSVRC_crops生成剪切之后的图片,成对,每一帧都有一个x和一个z。
如:
000000.00.crop.x.jpg
000000.00.crop.z.jpg
'''
import numpy as np
import os
import glob
import xml.etree.ElementTree as ET
import cv2
import datetime
'''
# default setting for cropping
'''
examplar_size = 127.0 # 模板z的尺寸
# instance_size = 255.0
instance_size = 271.0 # 实例x的尺寸
context_amount = 0.5 # 数据放大的比例 (1/2)(w+h)
def get_subwindow_avg(im, pos, model_sz, original_sz):
'''
# obtain image patch, padding with avg channel if area goes outside of border
'''
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if original_sz is None:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert (im_sz[0] > 2) & (im_sz[1] > 2), "The size of image is too small!"
c = (sz + 1) / 2
# check out-of-bounds coordinates, and set them to black
context_xmin = round(pos[1] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[0] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = max(0, 1 - context_xmin) # in python, index starts from 0
top_pad = max(0, 1 - context_ymin)
right_pad = max(0, context_xmax - im_sz[1])
bottom_pad = max(0, context_ymax - im_sz[0])
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
im_R = im[:, :, 0]
im_G = im[:, :, 1]
im_B = im[:, :, 2]
# padding
if (top_pad != 0) | (bottom_pad != 0) | (left_pad != 0) | (right_pad != 0):
im_R = np.pad(im_R, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[0])
im_G = np.pad(im_G, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[1])
im_B = np.pad(im_B, ((int(top_pad), int(bottom_pad)), (int(left_pad), int(right_pad))), 'constant',
constant_values=avg_chans[2])
im = np.stack((im_R, im_G, im_B), axis=2)
im_patch_original = im[int(context_ymin) - 1:int(context_ymax), int(context_xmin) - 1:int(context_xmax), :]
if model_sz != original_sz:
im_patch = cv2.resize(im_patch_original, (int(model_sz), int(model_sz)), interpolation=cv2.INTER_CUBIC)
else:
im_patch = im_patch_original
return im_patch
def get_crops(img, bbox, size_z, size_x, context_amount):
'''
# get examplar and search region crops
'''
cx = bbox[0] + bbox[2]/2
cy = bbox[1] + bbox[3]/2
w = bbox[2]
h = bbox[3]
# for examplar
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
im_crop_z = get_subwindow_avg(img, np.array([cy, cx]), size_z, round(s_z))
# for search region
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
im_crop_x = get_subwindow_avg(img, np.array([cy, cx]), size_x, round(s_x))
return im_crop_z, im_crop_x
def generate_image_crops(vid_root_path, vid_curated_path):
'''
# save image crops to the vid_curated_path
'''
anno_str = "Annotations/VID/train/"
data_str = "Data/VID/train/"
vid_anno_path = os.path.join(vid_root_path, anno_str)
vid_data_path = os.path.join(vid_root_path, data_str)
cur_procesed_fraem = 0
start_time = datetime.datetime.now()
total_time = 0
# dirs of level1: e.g., a/, b/, ...
all_dirs_level1 = os.listdir(vid_anno_path)
for i in range(len(all_dirs_level1)):
all_dirs_level2 = os.listdir(os.path.join(vid_anno_path, all_dirs_level1[i]))
# dirs of level2: e.g., a/ILSVRC2015_train_00000000/, a/ILSVRC2015_train_00001000/, ...
for j in range(len(all_dirs_level2)):
frame_list = glob.glob(os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], "*.xml"))
frame_list.sort()
# level3: frame level
for k in range(len(frame_list)):
frame_xml_name = os.path.join(vid_anno_path, all_dirs_level1[i], all_dirs_level2[j], frame_list[k])
frame_xml_tree = ET.parse(frame_xml_name)
frame_xml_root = frame_xml_tree.getroot()
# image file path
frame_img_name = (frame_list[k].replace(".xml", ".JPEG")).replace(vid_anno_path, vid_data_path)
img = cv2.imread(frame_img_name)
if img is None:
print("Cannot find %s!"%frame_img_name)
exit(0)
# image file name
frame_filename = frame_xml_root.find('filename').text
# process (all objects in) each frame
for object in frame_xml_root.iter("object"):
# get trackid
id = object.find("trackid").text
# get bounding box
bbox_node = object.find("bndbox")
xmax = float(bbox_node.find('xmax').text)
xmin = float(bbox_node.find('xmin').text)
ymax = float(bbox_node.find('ymax').text)
ymin = float(bbox_node.find('ymin').text)
width = xmax - xmin + 1
height = ymax - ymin + 1
bbox = np.array([xmin, ymin, width, height])
# print("processing %s, %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename+".JPEG", id))
# get crops
im_crop_z, im_crop_x = get_crops(img, bbox, examplar_size, instance_size, context_amount)
# save crops
save_path = os.path.join(vid_curated_path, data_str, all_dirs_level1[i], all_dirs_level2[j])
if not os.path.exists(save_path):
os.makedirs(save_path)
savename_crop_z = os.path.join(save_path, '{}.{:02d}.crop.z.jpg'.format(frame_filename, int(id)))
savename_crop_x = os.path.join(save_path, '{}.{:02d}.crop.x.jpg'.format(frame_filename, int(id)))
cv2.imwrite(savename_crop_z, im_crop_z, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cv2.imwrite(savename_crop_x, im_crop_x, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cur_procesed_fraem = cur_procesed_fraem + 1
if cur_procesed_fraem % 1000 == 0:
end_time = datetime.datetime.now()
total_time = total_time + int((end_time-start_time).seconds)
print("finished processing %d frames in %d seconds (FPS: %d ) ..." % (cur_procesed_fraem, total_time, int(1000/(end_time-start_time).seconds)))
start_time = datetime.datetime.now()
def generate_image_crops4otb(vid_root_path, vid_curated_path):
'''
# save image crops to the vid_curated_path
'''
anno_str = "groundtruth_rect.txt"
data_str = "img"
vid_anno_path = os.path.join(vid_root_path, anno_str)
vid_data_path = os.path.join(vid_root_path, data_str)
cur_procesed_fraem = 0
start_time = datetime.datetime.now()
total_time = 0
bboxs = [list(map(float,x.split(','))) for x in open(vid_anno_path, 'r').readlines()]
img_list = glob.glob(os.path.join(vid_data_path,"*.jpg"))
for i in range(len(img_list)):
# image file path
img_path = img_list[i]
img = cv2.imread(img_path)
if img is None:
print("Cannot find %s!" % img_path)
exit(0)
img_name = img_path.split('\\')[-1]
# get bounding box
bbox = bboxs[i]
xmax = bbox[0]+bbox[2]
xmin = bbox[0]
ymax = bbox[1]+bbox[3]
ymin = bbox[1]
width = bbox[2]
height = bbox[3]
new_bbox = np.array([xmin, ymin, width, height])
# print("processing %s, %s, %s, %s ..." % (all_dirs_level1[i], all_dirs_level2[j], frame_filename+".JPEG", id))
# get crops
im_crop_z, im_crop_x = get_crops(img, new_bbox, examplar_size, instance_size, context_amount)
# save crops
save_path = os.path.join(vid_curated_path, data_str)
if not os.path.exists(save_path):
os.makedirs(save_path)
savename_crop_z = os.path.join(save_path, '{}.crop.z.jpg'.format(img_name))
savename_crop_x = os.path.join(save_path, '{}.crop.x.jpg'.format(img_name))
cv2.imwrite(savename_crop_z, im_crop_z, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cv2.imwrite(savename_crop_x, im_crop_x, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
cur_procesed_fraem = cur_procesed_fraem + 1
if cur_procesed_fraem % 1000 == 0:
end_time = datetime.datetime.now()
total_time = total_time + int((end_time-start_time).seconds)
print("finished processing %d frames in %d seconds (FPS: %d ) ..." % (cur_procesed_fraem, total_time, int(1000/(end_time-start_time).seconds)))
start_time = datetime.datetime.now()
if __name__ == "__main__":
# path to your VID dataset
# vid_root_path = "/home/hfan/Dataset/ILSVRC2015"
# vid_curated_path = "/home/hfan/Dataset/ILSVRC2015_crops"
# Windows ILSVRC
vid_root_path = r"D:\workspace\MachineLearning\asimo\ILSVRC"
vid_curated_path = r"D:\workspace\MachineLearning\asimo\ILSVRC_crops"
# Windows OTB
vid_root_path = r"D:\workspace\MachineLearning\asimo\OTB_Train"
vid_curated_path = r"D:\workspace\MachineLearning\asimo\OTB_Train_crops"
# Linux
# vid_root_path = r"/home/zzx/vot/VGG/ILSVRC"
# vid_curated_path = r"/home/sjl/dataset/ILSVRC_crops"
if not os.path.exists(vid_curated_path):
os.mkdir(vid_curated_path)
# generate_image_crops(vid_root_path, vid_curated_path)
generate_image_crops4otb(vid_root_path, vid_curated_path)
|
the-stack_0_2276 | '''
Created on 22 Sep 2016
@author: andrew
'''
DATA_DIR = '/home/andrew/workspace/BKData/'
TESTS_DIR = DATA_DIR + 'tests/'
CIRCUIT_TEST_DIR = TESTS_DIR + 'circuit/'
CIRCUIT_ANGLE_TEST_DIR = CIRCUIT_TEST_DIR + 'angles/'
DEFAULT_CUTOFF = 1e-14
from yaferp.analysis import analyser
import cPickle
import scipy.sparse
import os
def loadCircuitAngles(fileName,boolJWorBK,cutoff=DEFAULT_CUTOFF):
if cutoff != None:
if boolJWorBK:
path = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) + '/BK/' + fileName + '.angs'
else:
path = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) +'/JW/' + fileName + '.angs'
fred = loadDict(path)
return fred
def saveDict(thing,dictPath):
with open(dictPath,'wb') as f:
cPickle.dump(thing,f,cPickle.HIGHEST_PROTOCOL)
def storeInDict(dictPath,theKey,theValue,rewrite=0):
thing = loadDict(dictPath)
if thing == None:
thing = {}
if(theKey in thing) and rewrite==0:
return thing
else:
thing[theKey] = theValue
saveDict(thing,dictPath)
return thing
def loadDict(dictPath):
if not os.path.isfile(dictPath):
return None
else:
with open(dictPath,'rb') as f:
it = cPickle.load(f)
return it
def calculateCircuitAngle(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,circuitType='normal',overwrite=0):
'''!!!this WILL FAIL if eigenvectors have not been previously generated!!! (also if cutoff ==None'''
circ = analyser.generateCircuit(filename, boolJWorBK, cutoff, circuitType, overwrite)
eigvec = analyser.readEigenvector(filename, boolJWorBK, cutoff)
# print(eigvec)
if circuitType in ['ancilla','ancillaOptimised']:
testVec = scipy.sparse.kron([[1.],[0.]],eigvec)
else:
testVec = eigvec
ang = circ.angle(testVec)
return ang
def generateCircuitAngle(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,circuitType='normal',overwrite=0):
if cutoff != None:
if boolJWorBK:
outputPath = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) + '/BK/' + filename + '.angs'
else:
outputPath = CIRCUIT_ANGLE_TEST_DIR + '/reduced/' + str(cutoff) +'/JW/' + filename + '.angs'
ang = calculateCircuitAngle(filename,boolJWorBK,cutoff,circuitType,overwrite)
storeInDict(outputPath,circuitType,ang,overwrite)
return ang
def generateManyCircuitAngles(filename,boolJWorBK,cutoff=DEFAULT_CUTOFF,listCircuitTypes='all',overwrite=0):
ALL_CIRCUIT_TYPES=['normal',
'optimised',
'interior',
'interiorOptimised',
'ancilla',
'ancillaOptimised']
if listCircuitTypes == 'all':
listCircuitTypes = ALL_CIRCUIT_TYPES
angles = {}
for circuitType in listCircuitTypes:
thisAngle = generateCircuitAngle(filename,boolJWorBK,cutoff,circuitType,overwrite)
angles[circuitType] = thisAngle
return angles
|
the-stack_0_2277 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import random
from numbers import Number
from functools import partial
from operator import methodcaller
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
import numpy as np
import cv2
import imghdr
from PIL import Image
import paddlers
from .functions import normalize, horizontal_flip, permute, vertical_flip, center_crop, is_poly, \
horizontal_flip_poly, horizontal_flip_rle, vertical_flip_poly, vertical_flip_rle, crop_poly, \
crop_rle, expand_poly, expand_rle, resize_poly, resize_rle, de_haze, pca, select_bands, \
to_intensity, to_uint8, img_flip, img_simple_rotate
__all__ = [
"Compose",
"ImgDecoder",
"Resize",
"RandomResize",
"ResizeByShort",
"RandomResizeByShort",
"ResizeByLong",
"RandomHorizontalFlip",
"RandomVerticalFlip",
"Normalize",
"CenterCrop",
"RandomCrop",
"RandomScaleAspect",
"RandomExpand",
"Padding",
"MixupImage",
"RandomDistort",
"RandomBlur",
"RandomSwap",
"Defogging",
"DimReducing",
"BandSelecting",
"ArrangeSegmenter",
"ArrangeChangeDetector",
"ArrangeClassifier",
"ArrangeDetector",
"RandomFlipOrRotation",
]
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
class Transform(object):
"""
Parent class of all data augmentation operations
"""
def __init__(self):
pass
def apply_im(self, image):
pass
def apply_mask(self, mask):
pass
def apply_bbox(self, bbox):
pass
def apply_segm(self, segms):
pass
def apply(self, sample):
if 'image' in sample:
sample['image'] = self.apply_im(sample['image'])
else: # image_tx
sample['image'] = self.apply_im(sample['image_t1'])
sample['image2'] = self.apply_im(sample['image_t2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'gt_bbox' in sample:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
return sample
def __call__(self, sample):
if isinstance(sample, Sequence):
sample = [self.apply(s) for s in sample]
else:
sample = self.apply(sample)
return sample
class ImgDecoder(Transform):
"""
Decode image(s) in input.
Args:
to_rgb (bool, optional): If True, convert input images from BGR format to RGB format. Defaults to True.
"""
def __init__(self, to_rgb=True, to_uint8=True):
super(ImgDecoder, self).__init__()
self.to_rgb = to_rgb
self.to_uint8 = to_uint8
def read_img(self, img_path, input_channel=3):
img_format = imghdr.what(img_path)
name, ext = os.path.splitext(img_path)
if img_format == 'tiff' or ext == '.img':
try:
import gdal
except:
try:
from osgeo import gdal
except:
raise Exception(
"Failed to import gdal! You can try use conda to install gdal"
)
six.reraise(*sys.exc_info())
dataset = gdal.Open(img_path)
if dataset == None:
raise Exception('Can not open', img_path)
im_data = dataset.ReadAsArray()
if im_data.ndim == 2:
im_data = to_intensity(im_data) # is read SAR
im_data = im_data[:, :, np.newaxis]
elif im_data.ndim == 3:
im_data = im_data.transpose((1, 2, 0))
return im_data
elif img_format in ['jpeg', 'bmp', 'png', 'jpg']:
if input_channel == 3:
return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH |
cv2.IMREAD_ANYCOLOR | cv2.IMREAD_COLOR)
else:
return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH |
cv2.IMREAD_ANYCOLOR)
elif ext == '.npy':
return np.load(img_path)
else:
raise Exception('Image format {} is not supported!'.format(ext))
def apply_im(self, im_path):
if isinstance(im_path, str):
try:
image = self.read_img(im_path)
except:
raise ValueError('Cannot read the image file {}!'.format(
im_path))
else:
image = im_path
if self.to_rgb and image.shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.to_uint8:
image = to_uint8(image)
return image
def apply_mask(self, mask):
try:
mask = np.asarray(Image.open(mask))
except:
raise ValueError("Cannot read the mask file {}!".format(mask))
if len(mask.shape) != 2:
raise Exception(
"Mask should be a 1-channel image, but recevied is a {}-channel image.".
format(mask.shape[2]))
return mask
def apply(self, sample):
"""
Args:
sample (dict): Input sample.
Returns:
dict: Decoded sample.
"""
if 'image' in sample:
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'image_t1' in sample and not 'image' in sample:
if not ('image_t2' in sample and 'image2' not in sample):
raise ValueError
sample['image'] = self.apply_im(sample['image_t1'])
sample['image2'] = self.apply_im(sample['image_t2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
im_height, im_width, _ = sample['image'].shape
se_height, se_width = sample['mask'].shape
if im_height != se_height or im_width != se_width:
raise Exception(
"The height or width of the im is not same as the mask")
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
# TODO: check the shape of auxiliary masks
sample['im_shape'] = np.array(
sample['image'].shape[:2], dtype=np.float32)
sample['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return sample
class Compose(Transform):
"""
Apply a series of data augmentation to the input.
All input images are in Height-Width-Channel ([H, W, C]) format.
Args:
transforms (List[paddlers.transforms.Transform]): List of data preprocess or augmentations.
Raises:
TypeError: Invalid type of transforms.
ValueError: Invalid length of transforms.
"""
def __init__(self, transforms):
super(Compose, self).__init__()
if not isinstance(transforms, list):
raise TypeError(
'Type of transforms is invalid. Must be List, but received is {}'
.format(type(transforms)))
if len(transforms) < 1:
raise ValueError(
'Length of transforms must not be less than 1, but received is {}'
.format(len(transforms)))
self.transforms = transforms
self.decode_image = ImgDecoder()
self.arrange_outputs = None
self.apply_im_only = False
def __call__(self, sample):
if self.apply_im_only:
if 'mask' in sample:
mask_backup = copy.deepcopy(sample['mask'])
del sample['mask']
if 'aux_masks' in sample:
aux_masks = copy.deepcopy(sample['aux_masks'])
sample = self.decode_image(sample)
for op in self.transforms:
# skip batch transforms amd mixup
if isinstance(op, (paddlers.transforms.BatchRandomResize,
paddlers.transforms.BatchRandomResizeByShort,
MixupImage)):
continue
sample = op(sample)
if self.arrange_outputs is not None:
if self.apply_im_only:
sample['mask'] = mask_backup
if 'aux_masks' in locals():
sample['aux_masks'] = aux_masks
sample = self.arrange_outputs(sample)
return sample
class Resize(Transform):
"""
Resize input.
- If target_size is an int, resize the image(s) to (target_size, target_size).
- If target_size is a list or tuple, resize the image(s) to target_size.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
target_size (int, List[int] or Tuple[int]): Target size. If int, the height and width share the same target_size.
Otherwise, target_size represents [target height, target width].
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional):
Interpolation method of resize. Defaults to 'LINEAR'.
keep_ratio (bool): the resize scale of width/height is same and width/height after resized is not greater
than target width/height. Defaults to False.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
"""
def __init__(self, target_size, interp='LINEAR', keep_ratio=False):
super(Resize, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
if isinstance(target_size, int):
target_size = (target_size, target_size)
else:
if not (isinstance(target_size,
(list, tuple)) and len(target_size) == 2):
raise TypeError(
"target_size should be an int or a list of length 2, but received {}".
format(target_size))
# (height, width)
self.target_size = target_size
self.interp = interp
self.keep_ratio = keep_ratio
def apply_im(self, image, interp, target_size):
flag = image.shape[2] == 1
image = cv2.resize(image, target_size, interpolation=interp)
if flag:
image = image[:, :, np.newaxis]
return image
def apply_mask(self, mask, target_size):
mask = cv2.resize(mask, target_size, interpolation=cv2.INTER_NEAREST)
return mask
def apply_bbox(self, bbox, scale, target_size):
im_scale_x, im_scale_y = scale
bbox[:, 0::2] *= im_scale_x
bbox[:, 1::2] *= im_scale_y
bbox[:, 0::2] = np.clip(bbox[:, 0::2], 0, target_size[0])
bbox[:, 1::2] = np.clip(bbox[:, 1::2], 0, target_size[1])
return bbox
def apply_segm(self, segms, im_size, scale):
im_h, im_w = im_size
im_scale_x, im_scale_y = scale
resized_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
resized_segms.append([
resize_poly(poly, im_scale_x, im_scale_y) for poly in segm
])
else:
# RLE format
resized_segms.append(
resize_rle(segm, im_h, im_w, im_scale_x, im_scale_y))
return resized_segms
def apply(self, sample):
if self.interp == "RANDOM":
interp = random.choice(list(interp_dict.values()))
else:
interp = interp_dict[self.interp]
im_h, im_w = sample['image'].shape[:2]
im_scale_y = self.target_size[0] / im_h
im_scale_x = self.target_size[1] / im_w
target_size = (self.target_size[1], self.target_size[0])
if self.keep_ratio:
scale = min(im_scale_y, im_scale_x)
target_w = int(round(im_w * scale))
target_h = int(round(im_h * scale))
target_size = (target_w, target_h)
im_scale_y = target_h / im_h
im_scale_x = target_w / im_w
sample['image'] = self.apply_im(sample['image'], interp, target_size)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], interp,
target_size)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], target_size)
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, target_size=target_size),
sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(
sample['gt_bbox'], [im_scale_x, im_scale_y], target_size)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(
sample['gt_poly'], [im_h, im_w], [im_scale_x, im_scale_y])
sample['im_shape'] = np.asarray(
sample['image'].shape[:2], dtype=np.float32)
if 'scale_factor' in sample:
scale_factor = sample['scale_factor']
sample['scale_factor'] = np.asarray(
[scale_factor[0] * im_scale_y, scale_factor[1] * im_scale_x],
dtype=np.float32)
return sample
class RandomResize(Transform):
"""
Resize input to random sizes.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
target_sizes (List[int], List[list or tuple] or Tuple[list or tuple]):
Multiple target sizes, each target size is an int or list/tuple.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional):
Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
See Also:
Resize input to a specific size.
"""
def __init__(self, target_sizes, interp='LINEAR'):
super(RandomResize, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
self.interp = interp
assert isinstance(target_sizes, list), \
"target_size must be List"
for i, item in enumerate(target_sizes):
if isinstance(item, int):
target_sizes[i] = (item, item)
self.target_size = target_sizes
def apply(self, sample):
height, width = random.choice(self.target_size)
resizer = Resize((height, width), interp=self.interp)
sample = resizer(sample)
return sample
class ResizeByShort(Transform):
"""
Resize input with keeping the aspect ratio.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
short_size (int): Target size of the shorter side of the image(s).
max_size (int, optional): The upper bound of longer side of the image(s). If max_size is -1, no upper bound is applied. Defaults to -1.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional): Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
ValueError: Invalid interpolation method.
"""
def __init__(self, short_size=256, max_size=-1, interp='LINEAR'):
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
super(ResizeByShort, self).__init__()
self.short_size = short_size
self.max_size = max_size
self.interp = interp
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
im_short_size = min(im_h, im_w)
im_long_size = max(im_h, im_w)
scale = float(self.short_size) / float(im_short_size)
if 0 < self.max_size < np.round(scale * im_long_size):
scale = float(self.max_size) / float(im_long_size)
target_w = int(round(im_w * scale))
target_h = int(round(im_h * scale))
sample = Resize(
target_size=(target_h, target_w), interp=self.interp)(sample)
return sample
class RandomResizeByShort(Transform):
"""
Resize input to random sizes with keeping the aspect ratio.
Attention: If interp is 'RANDOM', the interpolation method will be chose randomly.
Args:
short_sizes (List[int]): Target size of the shorter side of the image(s).
max_size (int, optional): The upper bound of longer side of the image(s). If max_size is -1, no upper bound is applied. Defaults to -1.
interp ({'NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM'}, optional): Interpolation method of resize. Defaults to 'LINEAR'.
Raises:
TypeError: Invalid type of target_size.
ValueError: Invalid interpolation method.
See Also:
ResizeByShort: Resize image(s) in input with keeping the aspect ratio.
"""
def __init__(self, short_sizes, max_size=-1, interp='LINEAR'):
super(RandomResizeByShort, self).__init__()
if not (interp == "RANDOM" or interp in interp_dict):
raise ValueError("interp should be one of {}".format(
interp_dict.keys()))
self.interp = interp
assert isinstance(short_sizes, list), \
"short_sizes must be List"
self.short_sizes = short_sizes
self.max_size = max_size
def apply(self, sample):
short_size = random.choice(self.short_sizes)
resizer = ResizeByShort(
short_size=short_size, max_size=self.max_size, interp=self.interp)
sample = resizer(sample)
return sample
class ResizeByLong(Transform):
def __init__(self, long_size=256, interp='LINEAR'):
super(ResizeByLong, self).__init__()
self.long_size = long_size
self.interp = interp
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
im_long_size = max(im_h, im_w)
scale = float(self.long_size) / float(im_long_size)
target_h = int(round(im_h * scale))
target_w = int(round(im_w * scale))
sample = Resize(
target_size=(target_h, target_w), interp=self.interp)(sample)
return sample
class RandomFlipOrRotation(Transform):
"""
Flip or Rotate an image in different ways with a certain probability.
Args:
probs (list of float): Probabilities of flipping and rotation. Default: [0.35,0.25].
probsf (list of float): Probabilities of 5 flipping mode
(horizontal, vertical, both horizontal diction and vertical, diagonal, anti-diagonal).
Default: [0.3, 0.3, 0.2, 0.1, 0.1].
probsr (list of float): Probabilities of 3 rotation mode(90°, 180°, 270° clockwise). Default: [0.25,0.5,0.25].
Examples:
from paddlers import transforms as T
# 定义数据增强
train_transforms = T.Compose([
T.RandomFlipOrRotation(
probs = [0.3, 0.2] # 进行flip增强的概率是0.3,进行rotate增强的概率是0.2,不变的概率是0.5
probsf = [0.3, 0.25, 0, 0, 0] # flip增强时,使用水平flip、垂直flip的概率分别是0.3、0.25,水平且垂直flip、对角线flip、反对角线flip概率均为0,不变的概率是0.45
probsr = [0, 0.65, 0]), # rotate增强时,顺时针旋转90度的概率是0,顺时针旋转180度的概率是0.65,顺时针旋转90度的概率是0,不变的概率是0.35
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
"""
def __init__(self,
probs=[0.35, 0.25],
probsf=[0.3, 0.3, 0.2, 0.1, 0.1],
probsr=[0.25, 0.5, 0.25]):
super(RandomFlipOrRotation, self).__init__()
# Change various probabilities into probability intervals, to judge in which mode to flip or rotate
self.probs = [probs[0], probs[0] + probs[1]]
self.probsf = self.get_probs_range(probsf)
self.probsr = self.get_probs_range(probsr)
def apply_im(self, image, mode_id, flip_mode=True):
if flip_mode:
image = img_flip(image, mode_id)
else:
image = img_simple_rotate(image, mode_id)
return image
def apply_mask(self, mask, mode_id, flip_mode=True):
if flip_mode:
mask = img_flip(mask, mode_id)
else:
mask = img_simple_rotate(mask, mode_id)
return mask
def get_probs_range(self, probs):
'''
Change various probabilities into cumulative probabilities
Args:
probs(list of float): probabilities of different mode, shape:[n]
Returns:
probability intervals(list of binary list): shape:[n, 2]
'''
ps = []
last_prob = 0
for prob in probs:
p_s = last_prob
cur_prob = prob / sum(probs)
last_prob += cur_prob
p_e = last_prob
ps.append([p_s, p_e])
return ps
def judge_probs_range(self, p, probs):
'''
Judge whether a probability value falls within the given probability interval
Args:
p(float): probability
probs(list of binary list): probability intervals, shape:[n, 2]
Returns:
mode id(int):the probability interval number where the input probability falls,
if return -1, the image will remain as it is and will not be processed
'''
for id, id_range in enumerate(probs):
if p > id_range[0] and p < id_range[1]:
return id
return -1
def apply(self, sample):
p_m = random.random()
if p_m < self.probs[0]:
mode_p = random.random()
mode_id = self.judge_probs_range(mode_p, self.probsf)
sample['image'] = self.apply_im(sample['image'], mode_id, True)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], mode_id, True)
elif p_m < self.probs[1]:
mode_p = random.random()
mode_id = self.judge_probs_range(mode_p, self.probsr)
sample['image'] = self.apply_im(sample['image'], mode_id, False)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], mode_id, False)
return sample
class RandomHorizontalFlip(Transform):
"""
Randomly flip the input horizontally.
Args:
prob(float, optional): Probability of flipping the input. Defaults to .5.
"""
def __init__(self, prob=0.5):
super(RandomHorizontalFlip, self).__init__()
self.prob = prob
def apply_im(self, image):
image = horizontal_flip(image)
return image
def apply_mask(self, mask):
mask = horizontal_flip(mask)
return mask
def apply_bbox(self, bbox, width):
oldx1 = bbox[:, 0].copy()
oldx2 = bbox[:, 2].copy()
bbox[:, 0] = width - oldx2
bbox[:, 2] = width - oldx1
return bbox
def apply_segm(self, segms, height, width):
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append(
[horizontal_flip_poly(poly, width) for poly in segm])
else:
# RLE format
flipped_segms.append(horizontal_flip_rle(segm, height, width))
return flipped_segms
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], im_w)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_h,
im_w)
return sample
class RandomVerticalFlip(Transform):
"""
Randomly flip the input vertically.
Args:
prob(float, optional): Probability of flipping the input. Defaults to .5.
"""
def __init__(self, prob=0.5):
super(RandomVerticalFlip, self).__init__()
self.prob = prob
def apply_im(self, image):
image = vertical_flip(image)
return image
def apply_mask(self, mask):
mask = vertical_flip(mask)
return mask
def apply_bbox(self, bbox, height):
oldy1 = bbox[:, 1].copy()
oldy2 = bbox[:, 3].copy()
bbox[:, 0] = height - oldy2
bbox[:, 2] = height - oldy1
return bbox
def apply_segm(self, segms, height, width):
flipped_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
flipped_segms.append(
[vertical_flip_poly(poly, height) for poly in segm])
else:
# RLE format
flipped_segms.append(vertical_flip_rle(segm, height, width))
return flipped_segms
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], im_h)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_h,
im_w)
return sample
class Normalize(Transform):
"""
Apply min-max normalization to the image(s) in input.
1. im = (im - min_value) * 1 / (max_value - min_value)
2. im = im - mean
3. im = im / std
Args:
mean(List[float] or Tuple[float], optional): Mean of input image(s). Defaults to [0.485, 0.456, 0.406].
std(List[float] or Tuple[float], optional): Standard deviation of input image(s). Defaults to [0.229, 0.224, 0.225].
min_val(List[float] or Tuple[float], optional): Minimum value of input image(s). Defaults to [0, 0, 0, ].
max_val(List[float] or Tuple[float], optional): Max value of input image(s). Defaults to [255., 255., 255.].
"""
def __init__(self,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
min_val=None,
max_val=None):
super(Normalize, self).__init__()
channel = len(mean)
if min_val is None:
min_val = [0] * channel
if max_val is None:
max_val = [255.] * channel
from functools import reduce
if reduce(lambda x, y: x * y, std) == 0:
raise ValueError(
'Std should not contain 0, but received is {}.'.format(std))
if reduce(lambda x, y: x * y,
[a - b for a, b in zip(max_val, min_val)]) == 0:
raise ValueError(
'(max_val - min_val) should not contain 0, but received is {}.'.
format((np.asarray(max_val) - np.asarray(min_val)).tolist()))
self.mean = mean
self.std = std
self.min_val = min_val
self.max_val = max_val
def apply_im(self, image):
image = image.astype(np.float32)
mean = np.asarray(
self.mean, dtype=np.float32)[np.newaxis, np.newaxis, :]
std = np.asarray(self.std, dtype=np.float32)[np.newaxis, np.newaxis, :]
image = normalize(image, mean, std, self.min_val, self.max_val)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class CenterCrop(Transform):
"""
Crop the input at the center.
1. Locate the center of the image.
2. Crop the sample.
Args:
crop_size(int, optional): target size of the cropped image(s). Defaults to 224.
"""
def __init__(self, crop_size=224):
super(CenterCrop, self).__init__()
self.crop_size = crop_size
def apply_im(self, image):
image = center_crop(image, self.crop_size)
return image
def apply_mask(self, mask):
mask = center_crop(mask, self.crop_size)
return mask
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'])
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(self.apply_mask, sample['aux_masks']))
return sample
class RandomCrop(Transform):
"""
Randomly crop the input.
1. Compute the height and width of cropped area according to aspect_ratio and scaling.
2. Locate the upper left corner of cropped area randomly.
3. Crop the image(s).
4. Resize the cropped area to crop_size by crop_size.
Args:
crop_size(int, List[int] or Tuple[int]): Target size of the cropped area. If None, the cropped area will not be
resized. Defaults to None.
aspect_ratio (List[float], optional): Aspect ratio of cropped region in [min, max] format. Defaults to [.5, 2.].
thresholds (List[float], optional): Iou thresholds to decide a valid bbox crop.
Defaults to [.0, .1, .3, .5, .7, .9].
scaling (List[float], optional): Ratio between the cropped region and the original image in [min, max] format.
Defaults to [.3, 1.].
num_attempts (int, optional): The number of tries before giving up. Defaults to 50.
allow_no_crop (bool, optional): Whether returning without doing crop is allowed. Defaults to True.
cover_all_box (bool, optional): Whether to ensure all bboxes are covered in the final crop. Defaults to False.
"""
def __init__(self,
crop_size=None,
aspect_ratio=[.5, 2.],
thresholds=[.0, .1, .3, .5, .7, .9],
scaling=[.3, 1.],
num_attempts=50,
allow_no_crop=True,
cover_all_box=False):
super(RandomCrop, self).__init__()
self.crop_size = crop_size
self.aspect_ratio = aspect_ratio
self.thresholds = thresholds
self.scaling = scaling
self.num_attempts = num_attempts
self.allow_no_crop = allow_no_crop
self.cover_all_box = cover_all_box
def _generate_crop_info(self, sample):
im_h, im_w = sample['image'].shape[:2]
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
thresholds = self.thresholds
if self.allow_no_crop:
thresholds.append('no_crop')
np.random.shuffle(thresholds)
for thresh in thresholds:
if thresh == 'no_crop':
return None
for i in range(self.num_attempts):
crop_box = self._get_crop_box(im_h, im_w)
if crop_box is None:
continue
iou = self._iou_matrix(
sample['gt_bbox'],
np.array(
[crop_box], dtype=np.float32))
if iou.max() < thresh:
continue
if self.cover_all_box and iou.min() < thresh:
continue
cropped_box, valid_ids = self._crop_box_with_center_constraint(
sample['gt_bbox'], np.array(
crop_box, dtype=np.float32))
if valid_ids.size > 0:
return crop_box, cropped_box, valid_ids
else:
for i in range(self.num_attempts):
crop_box = self._get_crop_box(im_h, im_w)
if crop_box is None:
continue
return crop_box, None, None
return None
def _get_crop_box(self, im_h, im_w):
scale = np.random.uniform(*self.scaling)
if self.aspect_ratio is not None:
min_ar, max_ar = self.aspect_ratio
aspect_ratio = np.random.uniform(
max(min_ar, scale**2), min(max_ar, scale**-2))
h_scale = scale / np.sqrt(aspect_ratio)
w_scale = scale * np.sqrt(aspect_ratio)
else:
h_scale = np.random.uniform(*self.scaling)
w_scale = np.random.uniform(*self.scaling)
crop_h = im_h * h_scale
crop_w = im_w * w_scale
if self.aspect_ratio is None:
if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
return None
crop_h = int(crop_h)
crop_w = int(crop_w)
crop_y = np.random.randint(0, im_h - crop_h)
crop_x = np.random.randint(0, im_w - crop_w)
return [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
def _iou_matrix(self, a, b):
tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])
br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
area_o = (area_a[:, np.newaxis] + area_b - area_i)
return area_i / (area_o + 1e-10)
def _crop_box_with_center_constraint(self, box, crop):
cropped_box = box.copy()
cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])
cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])
cropped_box[:, :2] -= crop[:2]
cropped_box[:, 2:] -= crop[:2]
centers = (box[:, :2] + box[:, 2:]) / 2
valid = np.logical_and(crop[:2] <= centers,
centers < crop[2:]).all(axis=1)
valid = np.logical_and(
valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
return cropped_box, np.where(valid)[0]
def _crop_segm(self, segms, valid_ids, crop, height, width):
crop_segms = []
for id in valid_ids:
segm = segms[id]
if is_poly(segm):
# Polygon format
crop_segms.append(crop_poly(segm, crop))
else:
# RLE format
crop_segms.append(crop_rle(segm, crop, height, width))
return crop_segms
def apply_im(self, image, crop):
x1, y1, x2, y2 = crop
return image[y1:y2, x1:x2, :]
def apply_mask(self, mask, crop):
x1, y1, x2, y2 = crop
return mask[y1:y2, x1:x2, ...]
def apply(self, sample):
crop_info = self._generate_crop_info(sample)
if crop_info is not None:
crop_box, cropped_box, valid_ids = crop_info
im_h, im_w = sample['image'].shape[:2]
sample['image'] = self.apply_im(sample['image'], crop_box)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], crop_box)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
crop_polys = self._crop_segm(
sample['gt_poly'],
valid_ids,
np.array(
crop_box, dtype=np.int64),
im_h,
im_w)
if [] in crop_polys:
delete_id = list()
valid_polys = list()
for idx, poly in enumerate(crop_polys):
if not crop_poly:
delete_id.append(idx)
else:
valid_polys.append(poly)
valid_ids = np.delete(valid_ids, delete_id)
if not valid_polys:
return sample
sample['gt_poly'] = valid_polys
else:
sample['gt_poly'] = crop_polys
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
sample['gt_class'] = np.take(
sample['gt_class'], valid_ids, axis=0)
if 'gt_score' in sample:
sample['gt_score'] = np.take(
sample['gt_score'], valid_ids, axis=0)
if 'is_crowd' in sample:
sample['is_crowd'] = np.take(
sample['is_crowd'], valid_ids, axis=0)
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], crop_box)
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, crop=crop_box),
sample['aux_masks']))
if self.crop_size is not None:
sample = Resize(self.crop_size)(sample)
return sample
class RandomScaleAspect(Transform):
"""
Crop input image(s) and resize back to original sizes.
Args:
min_scale (float): Minimum ratio between the cropped region and the original image.
If 0, image(s) will not be cropped. Defaults to .5.
aspect_ratio (float): Aspect ratio of cropped region. Defaults to .33.
"""
def __init__(self, min_scale=0.5, aspect_ratio=0.33):
super(RandomScaleAspect, self).__init__()
self.min_scale = min_scale
self.aspect_ratio = aspect_ratio
def apply(self, sample):
if self.min_scale != 0 and self.aspect_ratio != 0:
img_height, img_width = sample['image'].shape[:2]
sample = RandomCrop(
crop_size=(img_height, img_width),
aspect_ratio=[self.aspect_ratio, 1. / self.aspect_ratio],
scaling=[self.min_scale, 1.],
num_attempts=10,
allow_no_crop=False)(sample)
return sample
class RandomExpand(Transform):
"""
Randomly expand the input by padding according to random offsets.
Args:
upper_ratio(float, optional): The maximum ratio to which the original image is expanded. Defaults to 4..
prob(float, optional): The probability of apply expanding. Defaults to .5.
im_padding_value(List[float] or Tuple[float], optional): RGB filling value for the image. Defaults to (127.5, 127.5, 127.5).
label_padding_value(int, optional): Filling value for the mask. Defaults to 255.
See Also:
paddlers.transforms.Padding
"""
def __init__(self,
upper_ratio=4.,
prob=.5,
im_padding_value=127.5,
label_padding_value=255):
super(RandomExpand, self).__init__()
assert upper_ratio > 1.01, "expand ratio must be larger than 1.01"
self.upper_ratio = upper_ratio
self.prob = prob
assert isinstance(im_padding_value, (Number, Sequence)), \
"fill value must be either float or sequence"
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def apply(self, sample):
if random.random() < self.prob:
im_h, im_w = sample['image'].shape[:2]
ratio = np.random.uniform(1., self.upper_ratio)
h = int(im_h * ratio)
w = int(im_w * ratio)
if h > im_h and w > im_w:
y = np.random.randint(0, h - im_h)
x = np.random.randint(0, w - im_w)
target_size = (h, w)
offsets = (x, y)
sample = Padding(
target_size=target_size,
pad_mode=-1,
offsets=offsets,
im_padding_value=self.im_padding_value,
label_padding_value=self.label_padding_value)(sample)
return sample
class Padding(Transform):
def __init__(self,
target_size=None,
pad_mode=0,
offsets=None,
im_padding_value=127.5,
label_padding_value=255,
size_divisor=32):
"""
Pad image to a specified size or multiple of size_divisor.
Args:
target_size(int, Sequence, optional): Image target size, if None, pad to multiple of size_divisor. Defaults to None.
pad_mode({-1, 0, 1, 2}, optional): Pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets
if 0, only pad to right and bottom. If 1, pad according to center. If 2, only pad left and top. Defaults to 0.
im_padding_value(Sequence[float]): RGB value of pad area. Defaults to (127.5, 127.5, 127.5).
label_padding_value(int, optional): Filling value for the mask. Defaults to 255.
size_divisor(int): Image width and height after padding is a multiple of coarsest_stride.
"""
super(Padding, self).__init__()
if isinstance(target_size, (list, tuple)):
if len(target_size) != 2:
raise ValueError(
'`target_size` should include 2 elements, but it is {}'.
format(target_size))
if isinstance(target_size, int):
target_size = [target_size] * 2
assert pad_mode in [
-1, 0, 1, 2
], 'currently only supports four modes [-1, 0, 1, 2]'
if pad_mode == -1:
assert offsets, 'if pad_mode is -1, offsets should not be None'
self.target_size = target_size
self.size_divisor = size_divisor
self.pad_mode = pad_mode
self.offsets = offsets
self.im_padding_value = im_padding_value
self.label_padding_value = label_padding_value
def apply_im(self, image, offsets, target_size):
x, y = offsets
h, w = target_size
im_h, im_w, channel = image.shape[:3]
canvas = np.ones((h, w, channel), dtype=np.float32)
canvas *= np.array(self.im_padding_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)
return canvas
def apply_mask(self, mask, offsets, target_size):
x, y = offsets
im_h, im_w = mask.shape[:2]
h, w = target_size
canvas = np.ones((h, w), dtype=np.float32)
canvas *= np.array(self.label_padding_value, dtype=np.float32)
canvas[y:y + im_h, x:x + im_w] = mask.astype(np.float32)
return canvas
def apply_bbox(self, bbox, offsets):
return bbox + np.array(offsets * 2, dtype=np.float32)
def apply_segm(self, segms, offsets, im_size, size):
x, y = offsets
height, width = im_size
h, w = size
expanded_segms = []
for segm in segms:
if is_poly(segm):
# Polygon format
expanded_segms.append(
[expand_poly(poly, x, y) for poly in segm])
else:
# RLE format
expanded_segms.append(
expand_rle(segm, x, y, height, width, h, w))
return expanded_segms
def apply(self, sample):
im_h, im_w = sample['image'].shape[:2]
if self.target_size:
h, w = self.target_size
assert (
im_h <= h and im_w <= w
), 'target size ({}, {}) cannot be less than image size ({}, {})'\
.format(h, w, im_h, im_w)
else:
h = (np.ceil(im_h / self.size_divisor) *
self.size_divisor).astype(int)
w = (np.ceil(im_w / self.size_divisor) *
self.size_divisor).astype(int)
if h == im_h and w == im_w:
return sample
if self.pad_mode == -1:
offsets = self.offsets
elif self.pad_mode == 0:
offsets = [0, 0]
elif self.pad_mode == 1:
offsets = [(w - im_w) // 2, (h - im_h) // 2]
else:
offsets = [w - im_w, h - im_h]
sample['image'] = self.apply_im(sample['image'], offsets, (h, w))
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], offsets, (h, w))
if 'mask' in sample:
sample['mask'] = self.apply_mask(sample['mask'], offsets, (h, w))
if 'aux_masks' in sample:
sample['aux_masks'] = list(
map(partial(
self.apply_mask, offsets=offsets, target_size=(h, w)),
sample['aux_masks']))
if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:
sample['gt_bbox'] = self.apply_bbox(sample['gt_bbox'], offsets)
if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
sample['gt_poly'] = self.apply_segm(
sample['gt_poly'], offsets, im_size=[im_h, im_w], size=[h, w])
return sample
class MixupImage(Transform):
def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
"""
Mixup two images and their gt_bbbox/gt_score.
Args:
alpha (float, optional): Alpha parameter of beta distribution. Defaults to 1.5.
beta (float, optional): Beta parameter of beta distribution. Defaults to 1.5.
"""
super(MixupImage, self).__init__()
if alpha <= 0.0:
raise ValueError("alpha should be positive in {}".format(self))
if beta <= 0.0:
raise ValueError("beta should be positive in {}".format(self))
self.alpha = alpha
self.beta = beta
self.mixup_epoch = mixup_epoch
def apply_im(self, image1, image2, factor):
h = max(image1.shape[0], image2.shape[0])
w = max(image1.shape[1], image2.shape[1])
img = np.zeros((h, w, image1.shape[2]), 'float32')
img[:image1.shape[0], :image1.shape[1], :] = \
image1.astype('float32') * factor
img[:image2.shape[0], :image2.shape[1], :] += \
image2.astype('float32') * (1.0 - factor)
return img.astype('uint8')
def __call__(self, sample):
if not isinstance(sample, Sequence):
return sample
assert len(sample) == 2, 'mixup need two samples'
factor = np.random.beta(self.alpha, self.beta)
factor = max(0.0, min(1.0, factor))
if factor >= 1.0:
return sample[0]
if factor <= 0.0:
return sample[1]
image = self.apply_im(sample[0]['image'], sample[1]['image'], factor)
result = copy.deepcopy(sample[0])
result['image'] = image
# apply bbox and score
if 'gt_bbox' in sample[0]:
gt_bbox1 = sample[0]['gt_bbox']
gt_bbox2 = sample[1]['gt_bbox']
gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
result['gt_bbox'] = gt_bbox
if 'gt_poly' in sample[0]:
gt_poly1 = sample[0]['gt_poly']
gt_poly2 = sample[1]['gt_poly']
gt_poly = gt_poly1 + gt_poly2
result['gt_poly'] = gt_poly
if 'gt_class' in sample[0]:
gt_class1 = sample[0]['gt_class']
gt_class2 = sample[1]['gt_class']
gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
result['gt_class'] = gt_class
gt_score1 = np.ones_like(sample[0]['gt_class'])
gt_score2 = np.ones_like(sample[1]['gt_class'])
gt_score = np.concatenate(
(gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
result['gt_score'] = gt_score
if 'is_crowd' in sample[0]:
is_crowd1 = sample[0]['is_crowd']
is_crowd2 = sample[1]['is_crowd']
is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
result['is_crowd'] = is_crowd
if 'difficult' in sample[0]:
is_difficult1 = sample[0]['difficult']
is_difficult2 = sample[1]['difficult']
is_difficult = np.concatenate(
(is_difficult1, is_difficult2), axis=0)
result['difficult'] = is_difficult
return result
class RandomDistort(Transform):
"""
Random color distortion.
Args:
brightness_range(float, optional): Range of brightness distortion. Defaults to .5.
brightness_prob(float, optional): Probability of brightness distortion. Defaults to .5.
contrast_range(float, optional): Range of contrast distortion. Defaults to .5.
contrast_prob(float, optional): Probability of contrast distortion. Defaults to .5.
saturation_range(float, optional): Range of saturation distortion. Defaults to .5.
saturation_prob(float, optional): Probability of saturation distortion. Defaults to .5.
hue_range(float, optional): Range of hue distortion. Defaults to .5.
hue_prob(float, optional): Probability of hue distortion. Defaults to .5.
random_apply (bool, optional): whether to apply in random (yolo) or fixed (SSD)
order. Defaults to True.
count (int, optional): the number of doing distortion. Defaults to 4.
shuffle_channel (bool, optional): whether to swap channels randomly. Defaults to False.
"""
def __init__(self,
brightness_range=0.5,
brightness_prob=0.5,
contrast_range=0.5,
contrast_prob=0.5,
saturation_range=0.5,
saturation_prob=0.5,
hue_range=18,
hue_prob=0.5,
random_apply=True,
count=4,
shuffle_channel=False):
super(RandomDistort, self).__init__()
self.brightness_range = [1 - brightness_range, 1 + brightness_range]
self.brightness_prob = brightness_prob
self.contrast_range = [1 - contrast_range, 1 + contrast_range]
self.contrast_prob = contrast_prob
self.saturation_range = [1 - saturation_range, 1 + saturation_range]
self.saturation_prob = saturation_prob
self.hue_range = [1 - hue_range, 1 + hue_range]
self.hue_prob = hue_prob
self.random_apply = random_apply
self.count = count
self.shuffle_channel = shuffle_channel
def apply_hue(self, image):
low, high = self.hue_range
if np.random.uniform(0., 1.) < self.hue_prob:
return image
# it works, but result differ from HSV version
delta = np.random.uniform(low, high)
u = np.cos(delta * np.pi)
w = np.sin(delta * np.pi)
bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
t = np.dot(np.dot(ityiq, bt), tyiq).T
res_list = []
channel = image.shape[2]
for i in range(channel // 3):
sub_img = image[:, :, 3 * i:3 * (i + 1)]
sub_img = sub_img.astype(np.float32)
sub_img = np.dot(image, t)
res_list.append(sub_img)
if channel % 3 != 0:
i = channel % 3
res_list.append(image[:, :, -i:])
return np.concatenate(res_list, axis=2)
def apply_saturation(self, image):
low, high = self.saturation_range
delta = np.random.uniform(low, high)
if np.random.uniform(0., 1.) < self.saturation_prob:
return image
res_list = []
channel = image.shape[2]
for i in range(channel // 3):
sub_img = image[:, :, 3 * i:3 * (i + 1)]
sub_img = sub_img.astype(np.float32)
# it works, but result differ from HSV version
gray = sub_img * np.array(
[[[0.299, 0.587, 0.114]]], dtype=np.float32)
gray = gray.sum(axis=2, keepdims=True)
gray *= (1.0 - delta)
sub_img *= delta
sub_img += gray
res_list.append(sub_img)
if channel % 3 != 0:
i = channel % 3
res_list.append(image[:, :, -i:])
return np.concatenate(res_list, axis=2)
def apply_contrast(self, image):
low, high = self.contrast_range
if np.random.uniform(0., 1.) < self.contrast_prob:
return image
delta = np.random.uniform(low, high)
image = image.astype(np.float32)
image *= delta
return image
def apply_brightness(self, image):
low, high = self.brightness_range
if np.random.uniform(0., 1.) < self.brightness_prob:
return image
delta = np.random.uniform(low, high)
image = image.astype(np.float32)
image += delta
return image
def apply(self, sample):
if self.random_apply:
functions = [
self.apply_brightness, self.apply_contrast,
self.apply_saturation, self.apply_hue
]
distortions = np.random.permutation(functions)[:self.count]
for func in distortions:
sample['image'] = func(sample['image'])
if 'image2' in sample:
sample['image2'] = func(sample['image2'])
return sample
sample['image'] = self.apply_brightness(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_brightness(sample['image2'])
mode = np.random.randint(0, 2)
if mode:
sample['image'] = self.apply_contrast(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_contrast(sample['image2'])
sample['image'] = self.apply_saturation(sample['image'])
sample['image'] = self.apply_hue(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_saturation(sample['image2'])
sample['image2'] = self.apply_hue(sample['image2'])
if not mode:
sample['image'] = self.apply_contrast(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_contrast(sample['image2'])
if self.shuffle_channel:
if np.random.randint(0, 2):
sample['image'] = sample['image'][..., np.random.permutation(3)]
if 'image2' in sample:
sample['image2'] = sample['image2'][
..., np.random.permutation(3)]
return sample
class RandomBlur(Transform):
"""
Randomly blur input image(s).
Args:
prob (float): Probability of blurring.
"""
def __init__(self, prob=0.1):
super(RandomBlur, self).__init__()
self.prob = prob
def apply_im(self, image, radius):
image = cv2.GaussianBlur(image, (radius, radius), 0, 0)
return image
def apply(self, sample):
if self.prob <= 0:
n = 0
elif self.prob >= 1:
n = 1
else:
n = int(1.0 / self.prob)
if n > 0:
if np.random.randint(0, n) == 0:
radius = np.random.randint(3, 10)
if radius % 2 != 1:
radius = radius + 1
if radius > 9:
radius = 9
sample['image'] = self.apply_im(sample['image'], radius)
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'], radius)
return sample
class Defogging(Transform):
"""
Defog input image(s).
Args:
gamma (bool, optional): Use gamma correction or not. Defaults to False.
"""
def __init__(self, gamma=False):
super(Defogging, self).__init__()
self.gamma = gamma
def apply_im(self, image):
image = de_haze(image, self.gamma)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class DimReducing(Transform):
"""
Use PCA to reduce input image(s) dimension.
Args:
dim (int, optional): Reserved dimensions. Defaults to 3.
whiten (bool, optional): PCA whiten or not. Defaults to True.
"""
def __init__(self, dim=3, whiten=True):
super(DimReducing, self).__init__()
self.dim = dim
self.whiten = whiten
def apply_im(self, image):
image = pca(image, self.dim, self.whiten)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class BandSelecting(Transform):
"""
Select the band of the input image(s).
Args:
band_list (list, optional): Bands of selected (Start with 1). Defaults to [1, 2, 3].
"""
def __init__(self, band_list=[1, 2, 3]):
super(BandSelecting, self).__init__()
self.band_list = band_list
def apply_im(self, image):
image = select_bands(image, self.band_list)
return image
def apply(self, sample):
sample['image'] = self.apply_im(sample['image'])
if 'image2' in sample:
sample['image2'] = self.apply_im(sample['image2'])
return sample
class _PadBox(Transform):
def __init__(self, num_max_boxes=50):
"""
Pad zeros to bboxes if number of bboxes is less than num_max_boxes.
Args:
num_max_boxes (int, optional): the max number of bboxes. Defaults to 50.
"""
self.num_max_boxes = num_max_boxes
super(_PadBox, self).__init__()
def apply(self, sample):
gt_num = min(self.num_max_boxes, len(sample['gt_bbox']))
num_max = self.num_max_boxes
pad_bbox = np.zeros((num_max, 4), dtype=np.float32)
if gt_num > 0:
pad_bbox[:gt_num, :] = sample['gt_bbox'][:gt_num, :]
sample['gt_bbox'] = pad_bbox
if 'gt_class' in sample:
pad_class = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]
sample['gt_class'] = pad_class
if 'gt_score' in sample:
pad_score = np.zeros((num_max, ), dtype=np.float32)
if gt_num > 0:
pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]
sample['gt_score'] = pad_score
# in training, for example in op ExpandImage,
# the bbox and gt_class is expanded, but the difficult is not,
# so, judging by it's length
if 'difficult' in sample:
pad_diff = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]
sample['difficult'] = pad_diff
if 'is_crowd' in sample:
pad_crowd = np.zeros((num_max, ), dtype=np.int32)
if gt_num > 0:
pad_crowd[:gt_num] = sample['is_crowd'][:gt_num, 0]
sample['is_crowd'] = pad_crowd
return sample
class _NormalizeBox(Transform):
def __init__(self):
super(_NormalizeBox, self).__init__()
def apply(self, sample):
height, width = sample['image'].shape[:2]
for i in range(sample['gt_bbox'].shape[0]):
sample['gt_bbox'][i][0] = sample['gt_bbox'][i][0] / width
sample['gt_bbox'][i][1] = sample['gt_bbox'][i][1] / height
sample['gt_bbox'][i][2] = sample['gt_bbox'][i][2] / width
sample['gt_bbox'][i][3] = sample['gt_bbox'][i][3] / height
return sample
class _BboxXYXY2XYWH(Transform):
"""
Convert bbox XYXY format to XYWH format.
"""
def __init__(self):
super(_BboxXYXY2XYWH, self).__init__()
def apply(self, sample):
bbox = sample['gt_bbox']
bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]
bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.
sample['gt_bbox'] = bbox
return sample
class _Permute(Transform):
def __init__(self):
super(_Permute, self).__init__()
def apply(self, sample):
sample['image'] = permute(sample['image'], False)
if 'image2' in sample:
sample['image2'] = permute(sample['image2'], False)
return sample
class RandomSwap(Transform):
"""
Randomly swap multi-temporal images.
Args:
prob (float, optional): Probability of swapping the input images. Default: 0.2.
"""
def __init__(self, prob=0.2):
super(RandomSwap, self).__init__()
self.prob = prob
def apply(self, sample):
if 'image2' not in sample:
raise ValueError('image2 is not found in the sample.')
if random.random() < self.prob:
sample['image'], sample['image2'] = sample['image2'], sample[
'image']
return sample
class ArrangeSegmenter(Transform):
def __init__(self, mode):
super(ArrangeSegmenter, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if 'mask' in sample:
mask = sample['mask']
image = permute(sample['image'], False)
if self.mode == 'train':
mask = mask.astype('int64')
return image, mask
if self.mode == 'eval':
mask = np.asarray(Image.open(mask))
mask = mask[np.newaxis, :, :].astype('int64')
return image, mask
if self.mode == 'test':
return image,
class ArrangeChangeDetector(Transform):
def __init__(self, mode):
super(ArrangeChangeDetector, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if 'mask' in sample:
mask = sample['mask']
image_t1 = permute(sample['image'], False)
image_t2 = permute(sample['image2'], False)
if self.mode == 'train':
mask = mask.astype('int64')
masks = [mask]
if 'aux_masks' in sample:
masks.extend(
map(methodcaller('astype', 'int64'), sample['aux_masks']))
return (
image_t1,
image_t2, ) + tuple(masks)
if self.mode == 'eval':
mask = np.asarray(Image.open(mask))
mask = mask[np.newaxis, :, :].astype('int64')
return image_t1, image_t2, mask
if self.mode == 'test':
return image_t1, image_t2,
class ArrangeClassifier(Transform):
def __init__(self, mode):
super(ArrangeClassifier, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
image = permute(sample['image'], False)
if self.mode in ['train', 'eval']:
return image, sample['label']
else:
return image
class ArrangeDetector(Transform):
def __init__(self, mode):
super(ArrangeDetector, self).__init__()
if mode not in ['train', 'eval', 'test', 'quant']:
raise ValueError(
"mode should be defined as one of ['train', 'eval', 'test', 'quant']!"
)
self.mode = mode
def apply(self, sample):
if self.mode == 'eval' and 'gt_poly' in sample:
del sample['gt_poly']
return sample
|
the-stack_0_2278 | from datetime import datetime
from enum import Enum
from functools import wraps
from typing import Union
import json
import pytz
class DatetimeFormats(Enum):
FULLDATETIME = '%Y-%m-%dT%H:%M:%S.%f%z'
DATE = '%Y-%m-%d'
YMDHMSmS = '%Y-%m-%dT%H:%M:%S.%f%z'
YMDHMS = '%Y-%m-%dT%H:%M:%S%z'
YMD = '%Y-%m-%d'
def str_to_datetime(date: str, format_str: str) -> Union[datetime, None]:
"""Change date format to datetime, based on `format_str` provided.
If `date` is already a datetime object, return it. Make the date aware."""
if date is None:
return date
if not isinstance(date, datetime):
if format_str.find('%z') != -1:
# Localization needed. Check if provided.
if date[-1] == 'Z':
date = date[:-1]
format_str = format_str[:-2]
elif date.find('+') == -1:
format_str = format_str[:-2]
date = datetime.strptime(date, format_str)
try: # Localize to utc if not yet localized
return pytz.utc.localize(date)
except ValueError: # Already localized
return date
def _return_with_miliseconds(func):
"""If the date has milliseconds, return only 3 decimal places. Older servers
need this to work. New can parse both 3 and 6 decimal places."""
@wraps(func)
def inner(*args, **kwargs):
res = func(*args, **kwargs)
if isinstance(res, str) and res.find('.') != -1:
plus_pos = res.find('+')
res = f"{res[:res.find('.') + 4]}{res[plus_pos:] if plus_pos != -1 else ''}"
return res
return inner
@_return_with_miliseconds
def datetime_to_str(date: datetime, format_str: str) -> Union[str, None]:
"""Get date string from datetime, based on `format_str` provided.
If `date` is already a string, return it. Make the date aware."""
if isinstance(date, str):
return date
try:
# We need the date to be aware, or the string won't be accepted by API
try:
return pytz.utc.localize(date).strftime(format_str)
except ValueError: # Already localized
return date.strftime(format_str)
except (TypeError, AttributeError):
return None
def _get_only_datetimeformat_map(string_to_date_map: dict) -> dict:
"""Return all entries that are of `DATETIMEFORMAT` enum type."""
return {
key: value
for (key, value) in string_to_date_map.items()
if isinstance(value, DatetimeFormats)
}
def _solve_prefix_and_convert_date(func, name: str, date: str, string_to_date_map: dict,
only_datetimefomat: bool = True):
if only_datetimefomat:
string_to_date_map = _get_only_datetimeformat_map(string_to_date_map)
if f'_{name}' in string_to_date_map:
date_format = string_to_date_map[f'_{name}'].value if isinstance(
string_to_date_map[f'_{name}'], DatetimeFormats) else string_to_date_map[f'_{name}']
return func(date, date_format)
elif name in string_to_date_map:
date_format = string_to_date_map[name].value if isinstance(
string_to_date_map[name], DatetimeFormats) else string_to_date_map[name]
return func(date, date_format)
return date
def map_str_to_datetime(name: str, date: str, string_to_date_map: dict,
only_datetimefomat: bool = True) -> datetime:
"""Change date format to datetime, based on `string_to_date_map`
conversion dict. All occurrences of `DATETIMEFORMAT` Enum in
`string_to_date_map` are converted to corresponding string values.
If name is not found in `string_to_date_map`, returns date without changes.
"""
return _solve_prefix_and_convert_date(str_to_datetime, name, date, string_to_date_map,
only_datetimefomat)
def map_datetime_to_str(name: str, date: datetime, string_to_date_map: dict,
only_datetimefomat: bool = True) -> str:
"""Change date format to string, based on `string_to_date_map`
conversion dict. All occurrences of `DATETIMEFORMAT` Enum in
`string_to_date_map` are converted to corresponding string values.
If name is not found in `string_to_date_map`, returns date without changes.
"""
return _solve_prefix_and_convert_date(datetime_to_str, name, date, string_to_date_map,
only_datetimefomat)
def bulk_str_to_datetime(source: dict, string_to_date_map: dict,
only_datetimefomat: bool = True) -> dict:
"""Change all dates from `source` found in `string_to_date_map`
to datetime format. If parameter is not found in `string_to_date_map`,
it is returned without changes."""
for key, val in source.items():
source[key] = map_str_to_datetime(key, val, string_to_date_map, only_datetimefomat)
return source
def bulk_datetime_to_str(source: dict, string_to_date_map: dict,
only_datetimefomat: bool = True) -> dict:
"""Change all dates from `source` found in `string_to_date_map`
to string format. If parameter is not found in `string_to_date_map`,
it is returned without changes."""
for key, val in source.items():
source[key] = map_datetime_to_str(key, val, string_to_date_map, only_datetimefomat)
return source
def override_datetime_format(original_format: str, expected_format: str, fields: tuple,
to_unpack=None):
"""A decorator designed to override the datetime format
of some dates in responses from REST server as they can be
a bit crazy sometimes (e.g. two different formats for one object)
Args:
original_format: original format of a datetime
expected_format: the format you want to convert to
fields: fields of the object - e.g. dateModified, dateCreated
to_unpack: when response returns a list of objects
probably they need to be unpacked
"""
def decorator_datetime(func):
@wraps(func)
def wrapped(*args, **kwargs):
response = func(*args, **kwargs)
response_json = response.json()
try:
iterable = response_json[to_unpack] if to_unpack else [response_json]
except KeyError:
iterable = []
for obj in iterable:
for field in fields:
datetime_obj = str_to_datetime(obj[field], original_format)
obj[field] = datetime_to_str(datetime_obj, expected_format)
response.encoding, response._content = 'utf-8', json.dumps(response_json).encode(
'utf-8')
return response
return wrapped
return decorator_datetime
|
the-stack_0_2279 | """Support for Aurora Forecast sensor."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import PERCENTAGE
from . import AuroraEntity
from .const import COORDINATOR, DOMAIN
async def async_setup_entry(hass, entry, async_add_entries):
"""Set up the sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
entity = AuroraSensor(
coordinator=coordinator,
name=f"{coordinator.name} Aurora Visibility %",
icon="mdi:gauge",
)
async_add_entries([entity])
class AuroraSensor(SensorEntity, AuroraEntity):
"""Implementation of an aurora sensor."""
@property
def state(self):
"""Return % chance the aurora is visible."""
return self.coordinator.data
@property
def unit_of_measurement(self):
"""Return the unit of measure."""
return PERCENTAGE
|
the-stack_0_2280 | import csv
import os
from statistics import mean, median, quantiles
def process(fqp, resultsfile):
# gather the max per line of file of round 1
prev_fqp = fqp.replace("Round2", "Round1")
r1max = []
with open(prev_fqp, "r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
titles = next(datareader)
total_pos = [_ for _, y in enumerate(titles) if y == "Total"]
for row in datareader:
r1max.append(max([float(row[_]) for _ in total_pos]))
print(r1max)
# parse file of round 2
threads = -1
category = -1
senders = -1
totals = []
with open(fqp, "r") as csvfile:
datareader = csv.reader(csvfile, delimiter=',')
titles = next(datareader)
total_pos = [_ for _, y in enumerate(titles) if y == "Total"]
node_pos = [_ for _, y in enumerate(titles) if y.startswith("Node")]
for row in datareader:
if threads == -1:
threads = int(row[1])
category = row[0][0]
senders = [row[_] for _ in node_pos].count("sending")
prev_max = r1max.pop(0)
totals.extend([float(row[_])+prev_max for _ in total_pos])
nodes = len(node_pos)
## calculate statistics
mind = min(totals)
q1 = quantiles(totals)[0]
medi = median(totals)
avrg = mean(totals)
q3 = quantiles(totals)[2]
maxd = max(totals)
## write results
if not DEBUG:
with open(resultsfile, "a") as f:
f.write(f"{category},{nodes},{threads},{senders},{mind},{q1},{medi},{avrg},{q3},{maxd}\n")
with open(resultsfile.replace(".csv", "all_totals.csv"), "a") as f:
f.write(f"{category},{nodes},{threads},{senders},"+",".join(map(str, totals))+"\n")
print(f"{category},{nodes},{threads},{senders},{mind},{q1},{medi},{avrg},{q3},{maxd}")
# values:
# experiment = "threads"
# experiment = "nodes"
# experiment = "messages"
experiment = "messages"
## file where to write the aggregation results
## all totals will be written to experiment+"all_totals.csv" based on this filename
resultfile = experiment+".csv"
## basefolder where the experiment results can be found
basefolder = "C:\\epc2a\\"+experiment
## output to console instead of writing to file
DEBUG = False
if not DEBUG:
with open(resultfile, "w") as f:
f.write("category,nodes,threads,senders,mind,q1,medi,avrg,q3,maxd\n")
with open(resultfile.replace(".csv", "all_totals.csv"), "w") as f:
f.write("category,nodes,threads,senders,totals...\n")
for r, ds, fs in os.walk(basefolder):
for fn in [_ for _ in fs if _.endswith("Round2.csv")]:
fqp = r+"\\"+fn
process(fqp, resultfile)
|
the-stack_0_2281 | # Main differences in this ablation:
# - there is no optimism
# - the novelty Q is trained only between episodes
# - the novelty Q is trained on _logged_ novelty rewards, not live ones
import time
import os
import math
import pickle
import queue
from typing import Any
import numpy as np
import matplotlib.pyplot as plt
import jax
from jax import numpy as jnp, random, lax
import flax
from flax import nn, optim, struct
from dm_control import suite
import dmcontrol_gridworld
import replay_buffer
import q_learning
import tabular_density as density
import utils
from observation_domains import DOMAINS
import jax_specs
import point
R_MAX = 100
@struct.dataclass
class ExplorationState():
"""The pure-JAX components that can be jitted/vmapped.
"""
novq_state: q_learning.QLearnerState
target_novq_state: q_learning.QLearnerState
density_state: density.DensityState
temperature: float
update_temperature: float
prior_count: float
optimistic_updates: bool
target_network: bool
# density_fns: Any
@struct.dataclass
class AgentState():
"""A container for the entire state; not jittable.
"""
exploration_state: ExplorationState
policy_state: Any = struct.field(pytree_node=False)
replay: Any = struct.field(pytree_node=False)
policy_replay: Any = struct.field(pytree_node=False)
n_candidates: int
n_update_candidates: int
prioritized_update: bool
update_target_every: int
warmup_steps: int
optimistic_actions: bool
steps_since_tupdate: int = 0
# policy_fns: Any = struct.field(pytree_node=False)
@jax.jit
def compute_novelty_reward(exploration_state, states, actions):
"""Returns a novelty reward in [0, 1] for each (s, a) pair."""
counts = density.get_count_batch(
exploration_state.density_state, states, actions)
ones = jnp.ones(jnp.array(counts).shape)
rewards = (counts + 1e-8) ** (-0.5)
options = jnp.stack([ones, rewards], axis=1)
# Clip rewards to be at most 1 (when count is 0)
return jnp.min(options, axis=1)
@jax.profiler.trace_function
@jax.partial(jax.jit, static_argnums=(3, 4))
def train_step_candidates(exploration_state: ExplorationState,
transitions,
candidate_next_actions,
use_target_network,
use_optimistic_updates):
"""The jittable component of the exploration Q function training step."""
# these transitions come from agent_state.replay, not policy_replay,
# so they contain *novelty* rewards
states, actions, next_states, novelty_reward = transitions
discount = exploration_state.novq_state.discount
temp = exploration_state.update_temperature
if use_optimistic_updates:
next_values = predict_optimistic_values_batch(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
next_values_target = predict_optimistic_values_batch(
exploration_state.target_novq_state,
exploration_state.density_state,
exploration_state.prior_count,
next_states, candidate_next_actions)
else:
next_values = q_learning.predict_action_values_batch(
exploration_state.novq_state,
next_states,
candidate_next_actions)
next_values_target = q_learning.predict_action_values_batch(
exploration_state.target_novq_state,
next_states,
candidate_next_actions)
# double DQN rule:
# - select next action according to current Q
# - evaluate it according to target Q
next_value_probs = nn.softmax(next_values / temp, axis=1)
next_value_elements = (next_value_probs * next_values_target)
expected_next_values = next_value_elements.sum(axis=1)
expected_next_values = expected_next_values.reshape(novelty_reward.shape)
# compute targets and update
q_targets = novelty_reward + discount * expected_next_values
# clip targets to be within the feasible set
q_targets = jnp.minimum(q_targets, R_MAX)
novq_state, losses = q_functions.train_step(
exploration_state.novq_state,
states, actions, q_targets)
return exploration_state.replace(novq_state=novq_state), losses
@jax.profiler.trace_function
def train_step(agent_state, transitions):
"""A full (optimistic) training step for the exploration Q function."""
states, actions, next_states, rewards = transitions
# candidate actions should be (bsize x n_update_candidates x *action_shape)
with jax.profiler.TraceContext("get candidates"):
policy_state, candidate_next_actions = policy.action_fn(
agent_state.policy_state, next_states,
int(agent_state.n_update_candidates), True)
agent_state = agent_state.replace(policy_state=policy_state)
# somehow if I don't cast these to bool JAX will recompile the jitted
# function train_step_candidates on every call...
with jax.profiler.TraceContext("train_step_candidates"):
exploration_state, losses = train_step_candidates(
agent_state.exploration_state,
transitions,
candidate_next_actions,
bool(agent_state.exploration_state.target_network),
bool(agent_state.exploration_state.optimistic_updates))
agent_state = agent_state.replace(exploration_state=exploration_state)
return agent_state, losses
def update_target_q(agent_state: AgentState):
exploration_state = agent_state.exploration_state.replace(
target_novq_state=agent_state.exploration_state.novq_state)
agent_state = agent_state.replace(exploration_state=exploration_state,
steps_since_tupdate=0)
return agent_state
def uniform_update(agent_state, rng, n=10):
for _ in range(n):
transitions = tuple((jnp.array(el)
for el in agent_state.replay.sample(128)))
agent_state, losses = train_step(agent_state, transitions)
agent_state = agent_state.replace(
steps_since_tupdate=agent_state.steps_since_tupdate + 1)
if agent_state.steps_since_tupdate >= agent_state.update_target_every:
agent_state = update_target_q(agent_state)
return agent_state
@jax.profiler.trace_function
def update_exploration(agent_state, rng, transition_id):
s, a, sp, r = agent_state.replay.get_transitions(transition_id)
# update density on new observations
with jax.profiler.TraceContext("update density"):
exploration_state = agent_state.exploration_state
density_state = density.update_batch(exploration_state.density_state,
jnp.expand_dims(s, axis=0),
jnp.expand_dims(a, axis=0))
exploration_state = exploration_state.replace(
density_state=density_state)
agent_state = agent_state.replace(exploration_state=exploration_state)
return agent_state
def compute_weight(prior_count, count):
root_real_count = count ** 0.5
# root_prior_count = prior_count ** 0.5
# return root_real_count / (root_real_count + root_prior_count)
root_total_count = (count + prior_count) ** 0.5
return root_real_count / root_total_count
@jax.profiler.trace_function
@jax.jit
def predict_optimistic_value(novq_state, density_state, prior_count,
state, action):
expanded_state = jnp.expand_dims(state, axis=0)
expanded_action = jnp.expand_dims(action, axis=0)
predicted_value = q_learning.predict_value(novq_state,
expanded_state,
expanded_action)
predicted_value = predicted_value.reshape(tuple())
count = density.get_count(density_state,
state, action)
weight = compute_weight(prior_count, count)
optimistic_value = weight * predicted_value + (1 - weight) * R_MAX
return optimistic_value
predict_optimistic_value_batch = jax.vmap( # noqa: E305
predict_optimistic_value, in_axes=(None, None, None, 0, 0))
predict_optimistic_values = jax.vmap(
predict_optimistic_value, in_axes=(None, None, None, None, 0))
predict_optimistic_values_batch = jax.vmap( # noqa: E305
predict_optimistic_values, in_axes=(None, None, None, 0, 0))
@jax.profiler.trace_function
@jax.jit
def select_candidate_optimistic(exploration_state, rng,
state, candidate_actions):
optimistic_values = predict_optimistic_values(
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count,
state, candidate_actions).reshape(-1)
return q_learning.sample_boltzmann(
rng, optimistic_values, candidate_actions,
exploration_state.temperature)
@jax.profiler.trace_function
def sample_exploration_action(agent_state: AgentState, rng, s, train=True):
# during test, take only one action sample from the task policy
# -> will follow the task policy
n = agent_state.n_candidates if train else 1
with jax.profiler.TraceContext("sample candidate actions"):
s_batch = jnp.expand_dims(s, axis=0)
policy_state, candidate_actions = policy.action_fn(
agent_state.policy_state, s_batch, n, train)
# policy.action_fn deals with batches and we only have one element
candidate_actions = candidate_actions[0]
agent_state = agent_state.replace(policy_state=policy_state)
with jax.profiler.TraceContext("select from candidates"):
if agent_state.optimistic_actions:
a, h = select_candidate_optimistic(agent_state.exploration_state,
rng, s, candidate_actions)
else:
a, _, h = q_learning.sample_action_boltzmann(
agent_state.exploration_state.novq_state, rng,
s, candidate_actions,
agent_state.exploration_state.temperature)
flag = 'train' if train else 'test'
logger.update(f'{flag}/explore_entropy', h)
return agent_state, a
def update_agent(agent_state: AgentState, rng, transition):
# add transition to replay
transition_id = agent_state.replay.append(*transition)
# update the density with the observed transition
agent_state = update_exploration(agent_state, rng, transition_id)
return agent_state
def run_episode(agent_state: AgentState, rng, env,
train=True, max_steps=None):
timestep = env.reset()
score, novelty_score = 0, 0
i = 0
while not timestep.last():
rng, action_rng = random.split(rng)
s = utils.flatten_observation(timestep.observation)
# put some random steps in the replay buffer
if len(agent_state.replay) < agent_state.warmup_steps:
action_spec = jax_specs.convert_dm_spec(env.action_spec())
a = utils.sample_uniform_actions(action_spec, action_rng, 1)[0]
flag = 'train' if train else 'test'
logger.update(f'{flag}/policy_entropy', 0)
logger.update(f'{flag}/explore_entropy', 0)
else:
agent_state, a = sample_exploration_action(
agent_state, action_rng, s, train)
timestep = env.step(a)
sp = utils.flatten_observation(timestep.observation)
r = timestep.reward
novelty_reward = compute_novelty_reward(agent_state.exploration_state,
jnp.expand_dims(s, axis=0),
jnp.expand_dims(a, axis=0))
score += r
novelty_score += float(novelty_reward)
if train:
novelty_transition = (s, a, sp, novelty_reward)
task_transition = (s, a, sp, r)
agent_state.policy_replay.append(*task_transition)
rng, update_rng = random.split(rng)
agent_state = update_agent(agent_state, update_rng, novelty_transition)
i += 1
if max_steps is not None and i >= max_steps:
break
return agent_state, env, score, novelty_score
# ----- Visualizations for gridworld ---------------------------------
def display_state(agent_state: AgentState, ospec, aspec,
max_steps=100, bins=20,
rendering='local', savedir=None, episode=None):
exploration_state = agent_state.exploration_state
policy_state = agent_state.policy_state
# min_count_map = dmcontrol_gridworld.render_function(
# jax.partial(density.get_count_batch, exploration_state.density_state),
# env, reduction=jnp.min)
count_map = utils.render_function(
jax.partial(density.get_count_batch, exploration_state.density_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
novq_map = utils.render_function(
jax.partial(q_learning.predict_value, exploration_state.novq_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
optimistic_novq_map = utils.render_function(
jax.partial(predict_optimistic_value_batch,
exploration_state.novq_state,
exploration_state.density_state,
exploration_state.prior_count),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
novelty_reward_map = utils.render_function(
jax.partial(compute_novelty_reward, exploration_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
traj_map = replay_buffer.render_trajectory(
agent_state.replay, max_steps, ospec, bins=bins)
subfigs = [
# (min_count_map, "Visit count (min)"),
(count_map, "Visit count (max)"),
(novq_map, "Novelty value (max)"),
(optimistic_novq_map, "Optimistic novelty value (max)"),
(novelty_reward_map, "Novelty reward (max)"),
(traj_map, "Last trajectory"),
]
q_policies = ['policies.deep_q_policy', 'policies.tabular_q_policy']
if policy.__name__ in q_policies:
taskq_map = utils.render_function(
jax.partial(q_learning.predict_value, policy_state.q_state),
agent_state.replay,
ospec, aspec, reduction=jnp.max, bins=bins)
subfigs.append((taskq_map, "Task value (max)"))
# dump the raw data for later rendering
raw_path = f"{savedir}/data/{episode}.pkl"
os.makedirs(os.path.dirname(raw_path), exist_ok=True)
with open(raw_path, 'wb') as f:
pickle.dump(subfigs, f, protocol=4)
fig, axs = plt.subplots(1, len(subfigs))
for ax, subfig in zip(axs, subfigs):
render, title = subfig
img = ax.imshow(render)
fig.colorbar(img, ax=ax)
ax.set_title(title)
fig.set_size_inches(4 * len(subfigs), 3)
fig_path = f"{savedir}/{episode}.png"
utils.display_figure(fig, rendering, savepath=fig_path)
# -------------------------------------------------------------------
def main(args):
rng = random.PRNGKey(args.seed)
if args.env == 'gridworld':
env = dmcontrol_gridworld.GridWorld(args.env_size, args.max_steps)
observation_spec = env.observation_spec()
else:
env = suite.load(args.env, args.task)
observation_spec = DOMAINS[args.env][args.task]
action_spec = env.action_spec()
j_action_spec = jax_specs.convert_dm_spec(action_spec)
j_observation_spec = jax_specs.convert_dm_spec(observation_spec)
state_shape = utils.flatten_spec_shape(j_observation_spec)
action_shape = action_spec.shape
batch_size = 128
# drawing only one candidate action sample from the policy
# will result in following the policy directly
n_candidates = 64 if args.use_exploration else 1
novq_state = q_functions.init_fn(args.seed,
observation_spec,
action_spec,
# env_size=env.size,
discount=0.97,
max_value=R_MAX)
density_state = density.new(observation_spec, action_spec,
state_bins=args.n_state_bins,
action_bins=args.n_action_bins)
replay = replay_buffer.LowPrecisionTracingReplay(
state_shape, action_shape, min_s=0, max_s=1, n_bins=2)
policy_replay = replay_buffer.LowPrecisionTracingReplay(
state_shape, action_shape, min_s=0, max_s=1, n_bins=2)
policy_state = policy.init_fn(observation_spec, action_spec, args.seed,
lr=args.policy_lr,
update_rule=args.policy_update)
exploration_state = ExplorationState(
novq_state=novq_state,
target_novq_state=novq_state,
density_state=density_state,
temperature=args.temperature,
update_temperature=args.update_temperature,
prior_count=args.prior_count,
optimistic_updates=args.optimistic_updates,
target_network=args.target_network)
agent_state = AgentState(exploration_state=exploration_state,
policy_state=policy_state,
replay=replay,
policy_replay=policy_replay,
n_candidates=n_candidates,
n_update_candidates=args.n_update_candidates,
prioritized_update=args.prioritized_update,
update_target_every=args.update_target_every,
warmup_steps=args.warmup_steps,
optimistic_actions=args.optimistic_actions,)
for episode in range(1, 1000):
# run an episode
rng, episode_rng = random.split(rng)
agent_state, env, score, novelty_score = run_episode(
agent_state, episode_rng, env, train=True, max_steps=args.max_steps)
logger.update('train/episode', episode)
logger.update('train/score', score)
logger.update('train/novelty_score', novelty_score)
# update the task policy
# TODO: pull this loop inside the policy.update_fn
n_updates = args.max_steps // 2
policy_state = agent_state.policy_state
for _ in range(n_updates):
transitions = agent_state.policy_replay.sample(batch_size)
transitions = tuple((jnp.array(el) for el in transitions))
policy_state = policy.update_fn(
policy_state, transitions)
agent_state = agent_state.replace(policy_state=policy_state)
rng, update_rng = random.split(rng)
agent_state = uniform_update(agent_state, update_rng, n=n_updates)
# output / visualize
if episode % args.eval_every == 0:
rng, episode_rng = random.split(rng)
_, _, test_score, test_novelty_score = run_episode(
agent_state, episode_rng, env,
train=False, max_steps=args.max_steps)
logger.update('test/episode', episode)
logger.update('test/score', test_score)
logger.update('test/novelty_score', test_novelty_score)
logger.write_all()
if args.vis != 'none':
# savepath = f"{args.save_dir}/{episode}"
display_state(agent_state, observation_spec, action_spec,
max_steps=args.max_steps, bins=args.n_state_bins,
rendering=args.vis, savedir=args.save_dir,
episode=episode)
if episode % args.save_replay_every == 0:
replay_path = f"{args.save_dir}/replay.pkl"
replay_buffer.save(agent_state.replay, replay_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='default')
parser.add_argument('--env', default='gridworld')
parser.add_argument('--task', default='default')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--env_size', type=int, default=20)
parser.add_argument('--max_steps', type=int, default=1000)
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('--vis', default='disk')
parser.add_argument('--eval_every', type=int, default=10)
parser.add_argument('--save_replay_every', type=int, default=10)
parser.add_argument('--policy', type=str, default='deep')
parser.add_argument('--policy_update', type=str, default='ddqn')
parser.add_argument('--policy_lr', type=float, default=1e-3)
parser.add_argument('--novelty_q_function', type=str, default='deep')
parser.add_argument('--temperature', type=float, default=1e-1)
parser.add_argument('--update_temperature', type=float, default=None)
parser.add_argument('--prior_count', type=float, default=1e-3)
parser.add_argument('--n_update_candidates', type=int, default=64)
parser.add_argument('--n_state_bins', type=int, default=4)
parser.add_argument('--n_action_bins', type=int, default=2)
parser.add_argument('--optimistic_updates', dest='optimistic_updates',
action='store_true', default=False)
parser.add_argument('--optimistic_actions', dest='optimistic_actions',
action='store_true', default=False)
parser.add_argument('--target_network', action='store_true', default=True)
parser.add_argument('--no_target_network', dest='target_network',
action='store_false')
parser.add_argument('--update_target_every', type=int, default=10)
parser.add_argument('--warmup_steps', type=int, default=128)
parser.add_argument('--no_exploration', dest='use_exploration',
action='store_false', default=True)
parser.add_argument('--prioritized_update', dest='prioritized_update',
action='store_true', default=False)
parser.add_argument('--no_prioritized_update', dest='prioritized_update',
action='store_false')
args = parser.parse_args()
print(args)
if args.update_temperature is None:
print("Using --temperature as --update_temperature.")
args.update_temperature = args.temperature
args.save_dir = f"results/slow/{args.name}"
os.makedirs(args.save_dir, exist_ok=True)
import experiment_logging
experiment_logging.setup_default_logger(args.save_dir)
from experiment_logging import default_logger as logger
import json
with open(args.save_dir + '/args.json', 'w') as argfile:
json.dump(args.__dict__, argfile, indent=4)
if args.novelty_q_function == 'deep':
import deep_q_functions as q_functions
elif args.novelty_q_function == 'sigmoid':
import sigmoid_q_functions as q_functions
elif args.novelty_q_function == 'tabular':
import tabular_q_functions as q_functions
else:
raise Exception("Argument --novelty_q_function was invalid.")
if args.policy == 'deep':
import policies.deep_q_policy as policy
elif args.policy == 'uniform':
import policies.uniform_policy as policy
elif args.policy == 'tabular':
import policies.tabular_q_policy as policy
else:
raise Exception("Argument --policy was invalid.")
jit = not args.debug
if jit:
main(args)
else:
with jax.disable_jit():
main(args)
|
the-stack_0_2282 | #
# @lc app=leetcode id=445 lang=python
#
# [445] Add Two Numbers II
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for singly-linked list.
from LeetCode.Python.BaseListNode import MakeListNodes, PrintListNode, ListNode
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
"""
:type l1: ListNodeclass ListNode:
def __init__(self, x):
self.val = x
self.next = None
:type l2: ListNode
:rtype: ListNode
"""
l1_list = list()
l2_list = list()
while l1 is not None or l2 is not None:
if l1 is not None:
l1_list.append(l1)
l1 = l1.next
if l2 is not None:
l2_list.append(l2)
l2 = l2.next
carry = 0
result = None
while l1_list or l2_list or carry:
p = l1_list.pop().val if l1_list else 0
q = l2_list.pop().val if l2_list else 0
sum_tmp = p + q + carry
carry = int(sum_tmp // 10)
result_tmp = ListNode(sum_tmp % 10)
result_tmp.next = result
result = result_tmp
return result
if __name__ == '__main__':
s = Solution()
l_1 = MakeListNodes([2, 4, 3])
l_2 = MakeListNodes([5, 6, 4])
PrintListNode(s.addTwoNumbers(l_1, l_2))
PrintListNode(s.addTwoNumbers(ListNode(5), ListNode(5)))
l_1 = MakeListNodes([2, 4])
l_2 = MakeListNodes([5])
PrintListNode(s.addTwoNumbers(l_1, l_2))
|
the-stack_0_2284 | import pyglet, math
from pyglet.window import key
from . import bullet, physicalobject, resources
class Player(physicalobject.PhysicalObject):
"""Physical object that responds to user input"""
def __init__(self, *args, **kwargs):
super(Player, self).__init__(img=resources.player_image, *args, **kwargs)
# Create a child sprite to show when the ship is thrusting
self.engine_sprite = pyglet.sprite.Sprite(img=resources.engine_image, *args, **kwargs)
self.engine_sprite.visible = False
# Set some easy-to-tweak constants
self.thrust = 300.0
self.rotate_speed = 200.0
self.bullet_speed = 700.0
# Player should not collide with own bullets
self.reacts_to_bullets = False
# Tell the game handler about any event handlers
self.key_handler = key.KeyStateHandler()
self.event_handlers = [self, self.key_handler]
def update(self, dt):
# Do all the normal physics stuff
super(Player, self).update(dt)
if self.key_handler[key.LEFT]:
self.rotation -= self.rotate_speed * dt
if self.key_handler[key.RIGHT]:
self.rotation += self.rotate_speed * dt
if self.key_handler[key.UP]:
# Note: pyglet's rotation attributes are in "negative degrees"
angle_radians = -math.radians(self.rotation)
force_x = math.cos(angle_radians) * self.thrust * dt
force_y = math.sin(angle_radians) * self.thrust * dt
self.velocity_x += force_x
self.velocity_y += force_y
# If thrusting, update the engine sprite
self.engine_sprite.rotation = self.rotation
self.engine_sprite.x = self.x
self.engine_sprite.y = self.y
self.engine_sprite.visible = True
else:
# Otherwise, hide it
self.engine_sprite.visible = False
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.fire()
def fire(self):
# Note: pyglet's rotation attributes are in "negative degrees"
angle_radians = -math.radians(self.rotation)
# Create a new bullet just in front of the player
ship_radius = self.image.width / 2
bullet_x = self.x + math.cos(angle_radians) * ship_radius
bullet_y = self.y + math.sin(angle_radians) * ship_radius
new_bullet = bullet.Bullet(bullet_x, bullet_y, batch=self.batch)
# Give it some speed
bullet_vx = self.velocity_x + math.cos(angle_radians) * self.bullet_speed
bullet_vy = self.velocity_y + math.sin(angle_radians) * self.bullet_speed
new_bullet.velocity_x, new_bullet.velocity_y = bullet_vx, bullet_vy
# Add it to the list of objects to be added to the game_objects list
self.new_objects.append(new_bullet)
# Play the bullet sound
resources.bullet_sound.play()
def delete(self):
# We have a child sprite which must be deleted when this object
# is deleted from batches, etc.
self.engine_sprite.delete()
super(Player, self).delete()
|
the-stack_0_2287 | import numpy as np
def predict_one_vs_all(all_theta, X):
m = X.shape[0]
num_labels = all_theta.shape[0]
# You need to return the following variable correctly;
p = np.zeros(m)
# Add ones to the X data matrix
X = np.c_[np.ones(m), X]
# ===================== Your Code Here =====================
# Instructions : Complete the following code to make predictions using
# your learned logistic regression parameters (one vs all).
# You should set p to a vector of predictions (from 1 to
# num_labels)
#
# Hint : This code can be done all vectorized using the max function
# In particular, the max function can also return the index of the
# max element, for more information see 'np.argmax' function.
#
results = np.dot(X, all_theta.T)
p = np.argmax(results, axis=1)
p = np.array([x if x != 0 else 10 for x in p])
print('p {}'.format(p))
return p
|
the-stack_0_2289 | from typing import Dict, List, Optional, Tuple
from blspy import AugSchemeMPL, G2Element, PrivateKey
from kiwi.consensus.constants import ConsensusConstants
from kiwi.util.hash import std_hash
from kiwi.types.announcement import Announcement
from kiwi.types.blockchain_format.coin import Coin
from kiwi.types.blockchain_format.program import Program
from kiwi.types.blockchain_format.sized_bytes import bytes32
from kiwi.types.coin_spend import CoinSpend
from kiwi.types.condition_opcodes import ConditionOpcode
from kiwi.types.condition_with_args import ConditionWithArgs
from kiwi.types.spend_bundle import SpendBundle
from kiwi.util.clvm import int_from_bytes, int_to_bytes
from kiwi.util.condition_tools import conditions_by_opcode, conditions_for_solution, pkm_pairs_for_conditions_dict
from kiwi.util.ints import uint32, uint64
from kiwi.wallet.derive_keys import master_sk_to_wallet_sk
from kiwi.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
puzzle_for_pk,
solution_for_conditions,
)
DEFAULT_SEED = b"seed" * 8
assert len(DEFAULT_SEED) == 32
class WalletTool:
next_address = 0
pubkey_num_lookup: Dict[bytes, uint32] = {}
def __init__(self, constants: ConsensusConstants, sk: Optional[PrivateKey] = None):
self.constants = constants
self.current_balance = 0
self.my_utxos: set = set()
if sk is not None:
self.private_key = sk
else:
self.private_key = AugSchemeMPL.key_gen(DEFAULT_SEED)
self.generator_lookups: Dict = {}
self.puzzle_pk_cache: Dict = {}
self.get_new_puzzle()
def get_next_address_index(self) -> uint32:
self.next_address = uint32(self.next_address + 1)
return self.next_address
def get_private_key_for_puzzle_hash(self, puzzle_hash: bytes32) -> PrivateKey:
if puzzle_hash in self.puzzle_pk_cache:
child = self.puzzle_pk_cache[puzzle_hash]
private = master_sk_to_wallet_sk(self.private_key, uint32(child))
# pubkey = private.get_g1()
return private
else:
for child in range(self.next_address):
pubkey = master_sk_to_wallet_sk(self.private_key, uint32(child)).get_g1()
if puzzle_hash == puzzle_for_pk(bytes(pubkey)).get_tree_hash():
return master_sk_to_wallet_sk(self.private_key, uint32(child))
raise ValueError(f"Do not have the keys for puzzle hash {puzzle_hash}")
def puzzle_for_pk(self, pubkey: bytes) -> Program:
return puzzle_for_pk(pubkey)
def get_new_puzzle(self) -> bytes32:
next_address_index: uint32 = self.get_next_address_index()
pubkey = master_sk_to_wallet_sk(self.private_key, next_address_index).get_g1()
self.pubkey_num_lookup[bytes(pubkey)] = next_address_index
puzzle = puzzle_for_pk(bytes(pubkey))
self.puzzle_pk_cache[puzzle.get_tree_hash()] = next_address_index
return puzzle
def get_new_puzzlehash(self) -> bytes32:
puzzle = self.get_new_puzzle()
return puzzle.get_tree_hash()
def sign(self, value: bytes, pubkey: bytes) -> G2Element:
privatekey: PrivateKey = master_sk_to_wallet_sk(self.private_key, self.pubkey_num_lookup[pubkey])
return AugSchemeMPL.sign(privatekey, value)
def make_solution(self, condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]]) -> Program:
ret = []
for con_list in condition_dic.values():
for cvp in con_list:
ret.append([cvp.opcode.value] + cvp.vars)
return solution_for_conditions(Program.to(ret))
def generate_unsigned_transaction(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coins: List[Coin],
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]],
fee: int = 0,
secret_key: Optional[PrivateKey] = None,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> List[CoinSpend]:
spends = []
spend_value = sum([c.amount for c in coins])
if ConditionOpcode.CREATE_COIN not in condition_dic:
condition_dic[ConditionOpcode.CREATE_COIN] = []
if ConditionOpcode.CREATE_COIN_ANNOUNCEMENT not in condition_dic:
condition_dic[ConditionOpcode.CREATE_COIN_ANNOUNCEMENT] = []
output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [new_puzzle_hash, int_to_bytes(amount)])
condition_dic[output.opcode].append(output)
if additional_outputs is not None:
for o in additional_outputs:
out = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [o[0], int_to_bytes(o[1])])
condition_dic[out.opcode].append(out)
amount_total = sum(int_from_bytes(cvp.vars[1]) for cvp in condition_dic[ConditionOpcode.CREATE_COIN])
change = spend_value - amount_total - fee
if change > 0:
change_puzzle_hash = self.get_new_puzzlehash()
change_output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [change_puzzle_hash, int_to_bytes(change)])
condition_dic[output.opcode].append(change_output)
secondary_coins_cond_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = dict()
secondary_coins_cond_dic[ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT] = []
for n, coin in enumerate(coins):
puzzle_hash = coin.puzzle_hash
if secret_key is None:
secret_key = self.get_private_key_for_puzzle_hash(puzzle_hash)
pubkey = secret_key.get_g1()
puzzle = puzzle_for_pk(bytes(pubkey))
if n == 0:
message_list = [c.name() for c in coins]
for outputs in condition_dic[ConditionOpcode.CREATE_COIN]:
message_list.append(Coin(coin.name(), outputs.vars[0], int_from_bytes(outputs.vars[1])).name())
message = std_hash(b"".join(message_list))
condition_dic[ConditionOpcode.CREATE_COIN_ANNOUNCEMENT].append(
ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [message])
)
primary_announcement_hash = Announcement(coin.name(), message).name()
secondary_coins_cond_dic[ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT].append(
ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [primary_announcement_hash])
)
main_solution = self.make_solution(condition_dic)
spends.append(CoinSpend(coin, puzzle, main_solution))
else:
spends.append(CoinSpend(coin, puzzle, self.make_solution(secondary_coins_cond_dic)))
return spends
def sign_transaction(self, coin_spends: List[CoinSpend]) -> SpendBundle:
signatures = []
solution: Program
puzzle: Program
for coin_spend in coin_spends: # type: ignore # noqa
secret_key = self.get_private_key_for_puzzle_hash(coin_spend.coin.puzzle_hash)
synthetic_secret_key = calculate_synthetic_secret_key(secret_key, DEFAULT_HIDDEN_PUZZLE_HASH)
err, con, cost = conditions_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, self.constants.MAX_BLOCK_COST_CLVM
)
if not con:
raise ValueError(err)
conditions_dict = conditions_by_opcode(con)
for _, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_spend.coin.name()), self.constants.AGG_SIG_ME_ADDITIONAL_DATA
):
signature = AugSchemeMPL.sign(synthetic_secret_key, msg)
signatures.append(signature)
aggsig = AugSchemeMPL.aggregate(signatures)
spend_bundle = SpendBundle(coin_spends, aggsig)
return spend_bundle
def generate_signed_transaction(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coin: Coin,
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: int = 0,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = self.generate_unsigned_transaction(
amount, new_puzzle_hash, [coin], condition_dic, fee, additional_outputs=additional_outputs
)
assert transaction is not None
return self.sign_transaction(transaction)
def generate_signed_transaction_multiple_coins(
self,
amount: uint64,
new_puzzle_hash: bytes32,
coins: List[Coin],
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: int = 0,
additional_outputs: Optional[List[Tuple[bytes32, int]]] = None,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = self.generate_unsigned_transaction(
amount, new_puzzle_hash, coins, condition_dic, fee, additional_outputs=additional_outputs
)
assert transaction is not None
return self.sign_transaction(transaction)
|
the-stack_0_2293 | import glob
import sys
import os
import xml.etree.ElementTree as ET
from random import random
def main(filename):
# ratio to divide up the images
train = 0.7
val = 0.2
test = 0.1
if (train + test + val) != 1.0:
print("probabilities must equal 1")
exit()
# get the labels
labels = []
imgnames = []
annotations = {}
with open(filename, 'r') as labelfile:
label_string = ""
for line in labelfile:
label_string += line.rstrip()
labels = label_string.split(',')
labels = [elem.replace(" ", "") for elem in labels]
# get image names
for filename in os.listdir("./JPEGImages"):
if filename.endswith(".jpg"):
img = filename.rstrip('.jpg')
imgnames.append(img)
print("Labels:", labels, "imgcnt:", len(imgnames))
# initialise annotation list
for label in labels:
annotations[label] = []
# Scan the annotations for the labels
for img in imgnames:
annote = "Annotations/" + img + '.xml'
if os.path.isfile(annote):
tree = ET.parse(annote)
root = tree.getroot()
annote_labels = []
for labelname in root.findall('*/name'):
labelname = labelname.text
annote_labels.append(labelname)
if labelname in labels:
annotations[labelname].append(img)
annotations[img] = annote_labels
else:
print("Missing annotation for ", annote)
exit()
# divvy up the images to the different sets
sampler = imgnames.copy()
train_list = []
val_list = []
test_list = []
while len(sampler) > 0:
dice = random()
elem = sampler.pop()
if dice <= test:
test_list.append(elem)
elif dice <= (test + val):
val_list.append(elem)
else:
train_list.append(elem)
print("Training set:", len(train_list), "validation set:", len(val_list), "test set:", len(test_list))
# create the dataset files
create_folder("./ImageSets/Main/")
with open("./ImageSets/Main/train.txt", 'w') as outfile:
for name in train_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/val.txt", 'w') as outfile:
for name in val_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/trainval.txt", 'w') as outfile:
for name in train_list:
outfile.write(name + "\n")
for name in val_list:
outfile.write(name + "\n")
with open("./ImageSets/Main/test.txt", 'w') as outfile:
for name in test_list:
outfile.write(name + "\n")
# create the individiual files for each label
for label in labels:
with open("./ImageSets/Main/"+ label +"_train.txt", 'w') as outfile:
for name in train_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
with open("./ImageSets/Main/"+ label +"_val.txt", 'w') as outfile:
for name in val_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
with open("./ImageSets/Main/"+ label +"_test.txt", 'w') as outfile:
for name in test_list:
if label in annotations[name]:
outfile.write(name + " 1\n")
else:
outfile.write(name + " -1\n")
def create_folder(foldername):
if os.path.exists(foldername):
print('folder already exists:', foldername)
else:
os.makedirs(foldername)
if __name__=='__main__':
if len(sys.argv) < 2:
print("usage: python generate_vocdata.py <labelfile>")
exit()
main(sys.argv[1])
|
the-stack_0_2296 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow.compiler.mlir.tfr.integration.node_expansion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.compiler.mlir.tfr.resources import gen_composite_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
_lib_dir = os.path.dirname(gen_composite_ops.__file__)
_lib_name = os.path.basename(gen_composite_ops.__file__)[4:].replace(
'.py', '.so')
load_library.load_op_library(os.path.join(_lib_dir, _lib_name))
class NodeExpansionTest(test.TestCase):
def testAddN(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq1 = gen_composite_ops.my_add_n([t1])
sq2 = gen_composite_ops.my_add_n([t1, t2])
sq3 = gen_composite_ops.my_add_n([t1, t2, t3])
self.assertAllEqual(sq1.numpy().reshape(-1), [1, 2, 3, 4])
self.assertAllEqual(sq2.numpy().reshape(-1), [2, 4, 6, 8])
self.assertAllEqual(sq3.numpy().reshape(-1), [3, 6, 9, 12])
def testBiasedDense(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3)
self.assertAllEqual(sq.numpy().reshape(-1), [-3, 0, 5, 12])
def testBiasedDenseRelu(self):
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu')
self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12])
def testWithKnownKernel(self):
def biasd_dense_elu(x, y, z):
dot = gen_composite_ops.my_biased_dense(x, y, z)
return nn_ops.elu(dot) # with known kernel, should not expand.
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]])
sq = biasd_dense_elu(t1, t2, t3)
self.assertAllClose(sq.numpy().reshape(-1), [-0.950213, 0, 5, 12])
# Regression test for an issue where VarHandleOp wasn't being properly
# imported into MLIR for "no-op" node expansion.
def testVarHandleOp(self):
x = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
# Note: we purposely make multiple calls to VarHandleOp to exercise the
# cached kernal lookup path that was exhibiting the VarHandleOp import
# issue.
unused_ = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
handle = gen_resource_variable_ops.VarHandleOp(
dtype=dtypes.float32, shape=[3, 2])
gen_resource_variable_ops.AssignVariableOp(resource=handle, value=x)
self.assertAllEqual(
x,
gen_resource_variable_ops.ReadVariableOp(
resource=handle, dtype=dtypes.float32))
if __name__ == '__main__':
os.environ['TF_MLIR_TFR_LIB_DIR'] = 'tensorflow/compiler/mlir/tfr/resources'
ops.enable_eager_execution()
test.main()
|
the-stack_0_2297 | import typing
from typing import Optional, Any
import gym
import gym_minigrid.minigrid
import numpy as np
import torch
from babyai.utils.format import InstructionsPreprocessor
from gym_minigrid.minigrid import MiniGridEnv
from core.base_abstractions.sensor import Sensor, prepare_locals_for_super
from core.base_abstractions.task import Task, SubTaskType
# fmt: off
ALL_VOCAB_TOKENS = [
"a", "after", "and", "ball", "behind", "blue", "box",
"door", "front", "go", "green", "grey", "in", "key",
"left", "next", "of", "on", "open", "pick", "purple",
"put", "red", "right", "the", "then", "to", "up", "yellow",
"you", "your",
]
# fmt: on
class EgocentricMiniGridSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(
self,
agent_view_size: int,
view_channels: int = 1,
uuid: str = "minigrid_ego_image",
**kwargs: Any
):
self.agent_view_size = agent_view_size
self.view_channels = view_channels
self.num_objects = (
typing.cast(
int, max(map(abs, gym_minigrid.minigrid.OBJECT_TO_IDX.values())) # type: ignore
)
+ 1
)
self.num_colors = (
typing.cast(int, max(map(abs, gym_minigrid.minigrid.COLOR_TO_IDX.values()))) # type: ignore
+ 1
)
self.num_states = (
typing.cast(int, max(map(abs, gym_minigrid.minigrid.STATE_TO_IDX.values()))) # type: ignore
+ 1
)
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=max(self.num_objects, self.num_colors, self.num_states) - 1,
shape=(self.agent_view_size, self.agent_view_size, self.view_channels),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is not None and minigrid_output_obs["image"].shape == (
self.agent_view_size,
self.agent_view_size,
):
img = minigrid_output_obs["image"][:, :, : self.view_channels]
else:
env.agent_view_size = self.agent_view_size
img = env.gen_obs()["image"][:, :, : self.view_channels]
assert img.dtype == np.uint8
return img
class MiniGridMissionSensor(Sensor[MiniGridEnv, Task[MiniGridEnv]]):
def __init__(self, instr_len: int, uuid: str = "minigrid_mission", **kwargs: Any):
self.instr_preprocessor = InstructionsPreprocessor(
model_name="TMP_SENSOR", load_vocab_from=None
)
# We initialize the vocabulary with a fixed collection of tokens
# and then ensure that the size cannot exceed this number. This
# guarantees that sensors on all processes will produce the same
# values.
for token in ALL_VOCAB_TOKENS:
_ = self.instr_preprocessor.vocab[token]
self.instr_preprocessor.vocab.max_size = len(ALL_VOCAB_TOKENS)
self.instr_len = instr_len
observation_space = self._get_observation_space()
super().__init__(**prepare_locals_for_super(locals()))
def _get_observation_space(self) -> gym.Space:
return gym.spaces.Box(
low=0,
high=self.instr_preprocessor.vocab.max_size,
shape=(self.instr_len,),
dtype=int,
)
def get_observation(
self,
env: MiniGridEnv,
task: Optional[SubTaskType],
*args,
minigrid_output_obs: Optional[np.ndarray] = None,
**kwargs: Any
) -> Any:
if minigrid_output_obs is None:
minigrid_output_obs = env.gen_obs()
out = self.instr_preprocessor([minigrid_output_obs]).view(-1)
n: int = out.shape[0]
if n > self.instr_len:
out = out[: self.instr_len]
elif n < self.instr_len:
out = torch.nn.functional.pad(
input=out, pad=[0, self.instr_len - n], value=0,
)
return out.long().numpy()
|
the-stack_0_2298 | from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/webhooks/stripe', methods=['POST'])
def receive_stripe_webhook():
"""Receives a webhook payload from Stripe.
"""
# Try to parse a webhook payload, get upset if we couldn't
# parse any JSON in the body:
stripe_payload = request.json
if not stripe_payload:
return jsonify(message="Could not parse webhook payload"), 400
event = stripe_payload.get('type')
if not event:
return jsonify(message="Could not determine event type"), 400
if event == 'charge.succeeded':
# Pull fields out of payload:
data_object = stripe_payload.get('data').get('object')
customer_id = data_object.get('customer')
amount = data_object.get('amount')
# Here we just log the transaction, but at this point we can do
# anything! (Provision accounts, push to a database, etc.)
print(f'Customer {customer_id} made a purchase of {amount} cents!')
return jsonify(message="Webhook received"), 200 |
the-stack_0_2299 | import json
import sys
from wsgiref.simple_server import make_server
from . import NAME, VERSION, Kaa, KaaServer
from .openapi import OpenApi
from .server import Server
class Cli():
def __init__(self):
self.host = '127.0.0.1'
self.port = 8086
self.argv = sys.argv[:]
def execute(self):
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help'
if subcommand == 'version':
msg = self.__get_version()
elif subcommand == 'help':
msg = self.__get_help()
elif subcommand == 'serve':
self.__serve()
return
elif subcommand == 'openapi':
server:KaaServer = Server().get_server()
kaa:Kaa = server.get_kaa({
'REQUEST_METHOD': '',
'PATH_INFO': '',
'REMOTE_ADDR': '',
'QUERY_STRING': ''
}, None)
msg = json.dumps(OpenApi().generate(kaa))
else:
msg = 'Invalid command. Try help'
sys.stdout.write(msg + '\n')
def __get_name(self):
return NAME
def __get_version(self):
return VERSION
def __get_help(self):
commands = [
('version', 'Returns Kaa version'),
('serve', 'Starts a server for development'),
('openapi', 'Generates openapi json')
]
return '\n'.join(['{}\t\t{}'.format(*cmd) for cmd in commands])
def __serve(self):
self.__set_host_port()
sys.stdout.write('{} version {}\n'.format(self.__get_name(), self.__get_version()))
sys.stdout.write('Server started at {}:{}\n\n'.format(self.host, self.port))
server:KaaServer = Server().get_server()
make_server(
host=self.host,
port=int(self.port),
app=lambda env, start_response: server.get_kaa(env, start_response).serve()
).serve_forever()
def __set_host_port(self):
try:
porthost = self.argv[2].split(':')
if len(porthost) == 1:
self.port = porthost[0]
elif len(porthost) == 2:
self.host = porthost[0]
self.port = porthost[1]
else:
sys.stdout.write('Invalid host:port' + '\n')
sys.exit(1)
except IndexError:
pass
|
the-stack_0_2303 | from django.conf.urls import url
from . import views
app_name = 'twister'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^twist/$', views.TwistView.as_view(), name='twist'),
url(r'^domain/(?P<pk>.+)/$', views.DomainView.as_view(), name='domain'),
]
|
the-stack_0_2305 | from typing import List, NamedTuple, Tuple, Union
import geopandas as gpd
import gmsh
import numpy as np
import pandas as pd
import shapely.geometry as sg
from .common import FloatArray, IntArray, coord_dtype, flatten, separate
Z_DEFAULT = 0.0
POINT_DIM = 0
LINE_DIM = 1
PLANE_DIM = 2
class PolygonInfo(NamedTuple):
index: int
size: int
interior_indices: List[int]
interior_sizes: List[int]
polygon_id: int
class LineStringInfo(NamedTuple):
index: int
size: int
embedded_in: Union[int, None]
def polygon_info(
polygon: sg.Polygon, cellsize: float, index: int, polygon_id: int
) -> Tuple[PolygonInfo, FloatArray, FloatArray, int]:
exterior_coords = np.array(polygon.exterior.coords)[:-1]
size = len(exterior_coords)
vertices = [exterior_coords]
cellsizes = [np.full(size, cellsize)]
info = PolygonInfo(index, size, [], [], polygon_id)
index += size
for interior in polygon.interiors:
interior_coords = np.array(interior.coords)[:-1]
vertices.append(interior_coords)
size = len(interior_coords)
cellsizes.append(np.full(size, cellsize))
info.interior_indices.append(index)
info.interior_sizes.append(size)
index += size
return info, vertices, cellsizes, index
def linestring_info(
linestring: sg.LineString, cellsize: float, index: int, inside: Union[int, None]
) -> Tuple[LineStringInfo, FloatArray, FloatArray, int]:
vertices = np.array(linestring.coords)
size = len(vertices)
cellsizes = np.full(size, cellsize)
info = LineStringInfo(index, size, inside)
index += size
return info, vertices, cellsizes, index
def add_vertices(vertices, cellsizes, tags) -> None:
for (x, y), cellsize, tag in zip(vertices, cellsizes, tags):
gmsh.model.geo.addPoint(x, y, Z_DEFAULT, cellsize, tag)
def add_linestrings(
features: List[LineStringInfo], tags: IntArray
) -> Tuple[IntArray, IntArray]:
n_lines = sum(info.size - 1 for info in features)
line_indices = np.empty(n_lines, dtype=np.int64)
embedded_in = np.empty(n_lines, dtype=np.int64)
i = 0
for info in features:
point_tags = tags[info.index : info.index + info.size]
first = point_tags[0]
for second in point_tags[1:]:
line_index = gmsh.model.geo.addLine(first, second)
line_indices[i] = line_index
embedded_in[i] = info.embedded_in
first = second
i += 1
return line_indices, embedded_in
def add_curve_loop(point_tags: FloatArray) -> int:
tags = []
first = point_tags[-1]
for second in point_tags:
line_tag = gmsh.model.geo.addLine(first, second)
tags.append(line_tag)
first = second
curve_loop_tag = gmsh.model.geo.addCurveLoop(tags)
return curve_loop_tag
def add_polygons(
features: List[PolygonInfo], tags: IntArray
) -> Tuple[List[int], List[int]]:
plane_tags = []
for info in features:
# Add the exterior loop first
curve_loop_tags = [add_curve_loop(tags[info.index : info.index + info.size])]
# Now add holes
for start, size in zip(info.interior_indices, info.interior_sizes):
loop_tag = add_curve_loop(tags[start : start + size])
curve_loop_tags.append(loop_tag)
plane_tag = gmsh.model.geo.addPlaneSurface(curve_loop_tags, tag=info.polygon_id)
plane_tags.append(plane_tag)
return curve_loop_tags, plane_tags
def add_points(points: gpd.GeoDataFrame) -> Tuple[IntArray, IntArray]:
n_points = len(points)
indices = np.empty(n_points, dtype=np.int64)
embedded_in = points["__polygon_id"].values
# We have to add points one by one due to the Gmsh addPoint API
for i, row in enumerate(points.to_dict("records")):
point = row["geometry"]
# Rely on the automatic number of gmsh now to generate the indices
point_index = gmsh.model.geo.addPoint(
point.x, point.y, Z_DEFAULT, row["cellsize"]
)
indices[i] = point_index
return indices, embedded_in
def collect_polygons(
polygons: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[PolygonInfo]]:
vertices = []
cellsizes = []
features = []
for row in polygons.to_dict("records"):
info, coords, cells, index = polygon_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.extend(coords)
cellsizes.extend(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_linestrings(
linestrings: gpd.GeoDataFrame, index: int
) -> Tuple[int, FloatArray, IntArray, List[LineStringInfo]]:
vertices = []
cellsizes = []
features = []
for row in linestrings.to_dict("records"):
info, coords, cells, index = linestring_info(
row["geometry"], row["cellsize"], index, row["__polygon_id"]
)
vertices.append(coords)
cellsizes.append(cells)
features.append(info)
return index, vertices, cellsizes, features
def collect_points(points: gpd.GeoDataFrame) -> FloatArray:
return np.stack((points["geometry"].x, points["geometry"].y), axis=1)
def embed_where(gdf: gpd.GeoDataFrame, polygons: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
tmp = gpd.sjoin(gdf, polygons, predicate="within", how="inner")
tmp["cellsize"] = tmp[["cellsize_left", "cellsize_right"]].min(axis=1)
return tmp[["cellsize", "__polygon_id", "geometry"]]
def add_geometry(
polygons: gpd.GeoDataFrame, linestrings: gpd.GeoDataFrame, points: gpd.GeoDataFrame
):
# Assign unique ids
polygons["__polygon_id"] = np.arange(1, len(polygons) + 1)
# Figure out in which polygon the points and linestrings will be embedded.
linestrings = embed_where(linestrings, polygons)
embedded_points = embed_where(points, polygons)
# Collect all coordinates, and store the length and type of every element
index, poly_vertices, poly_cellsizes, polygon_features = collect_polygons(
polygons, index=0
)
index, line_vertices, line_cellsizes, linestring_features = collect_linestrings(
linestrings, index
)
vertices = np.concatenate(poly_vertices + line_vertices)
cellsizes = np.concatenate(poly_cellsizes + line_cellsizes)
# Get the unique vertices, and generate the array of indices pointing to
# them for every feature
vertices, indices = np.unique(
vertices.reshape(-1).view(coord_dtype), return_inverse=True
)
vertex_tags = np.arange(1, len(vertices) + 1)
tags = vertex_tags[indices]
# Get the smallest cellsize per vertex
cellsizes = pd.Series(cellsizes).groupby(tags).min().values
# Add all unique vertices. This includes vertices for linestrings and polygons.
add_vertices(vertices, cellsizes, vertex_tags)
# Add all geometries to gmsh
add_polygons(polygon_features, tags)
linestring_indices, linestring_embedded = add_linestrings(linestring_features, tags)
gmsh.model.geo.synchronize()
# Now embed the points and linestrings in the polygons
for polygon_id, embed_indices in pd.Series(linestring_indices).groupby(
linestring_embedded
):
gmsh.model.mesh.embed(LINE_DIM, embed_indices, PLANE_DIM, polygon_id)
if len(embedded_points) > 0:
point_indices, point_embedded = add_points(embedded_points)
gmsh.model.geo.synchronize()
for polygon_id, embed_indices in pd.Series(point_indices).groupby(
point_embedded
):
gmsh.model.mesh.embed(POINT_DIM, embed_indices, PLANE_DIM, polygon_id)
gmsh.model.geo.synchronize()
def add_field_points(points: gpd.GeoSeries) -> IntArray:
indices = np.empty(len(points), dtype=np.int64)
xy_coords = np.stack((points.x, points.y), axis=1)
for i, (x, y) in enumerate(xy_coords):
indices[i] = gmsh.model.geo.addPoint(x, y, Z_DEFAULT)
return indices
def add_field_linestring(
linestring: sg.LineString, minimum_cellsize: float
) -> IntArray:
n_vertices = int(np.ceil(linestring.length / minimum_cellsize))
indices = np.empty(n_vertices, dtype=np.int64)
for i, distance in enumerate(np.linspace(0.0, linestring.length, n_vertices)):
point = linestring.interpolate(distance)
indices[i] = gmsh.model.geo.addPoint(point.x, point.y, Z_DEFAULT)
return indices
def add_field_linestrings(
linestrings: gpd.GeoSeries, minimum_cellsize: float
) -> IntArray:
indices = [
add_field_linestring(linestring, minimum_cellsize) for linestring in linestrings
]
return np.concatenate(indices)
def add_field_polygons(polygons: gpd.GeoSeries, minimum_cellsize: float) -> IntArray:
indices = []
for exterior in polygons.exteriors:
indices.append(add_field_linestring(exterior, minimum_cellsize))
for interior in flatten(polygons.interiors):
indices.append(add_field_linestring(interior, minimum_cellsize))
return np.concatenate(indices)
def add_field_geometry(geometry: gpd.GeoSeries, minimum_cellsize: float) -> IntArray:
polygons, linestrings, points = separate(geometry)
point_nodes = add_field_points(points)
linestring_nodes = add_field_linestrings(linestrings, minimum_cellsize)
polygon_nodes = add_field_polygons(polygons, minimum_cellsize)
return np.concatenate((point_nodes, linestring_nodes, polygon_nodes))
|
the-stack_0_2306 | """
Download ACS data and parse for uploading
"""
import os.path
import json
import grequests
import pandas as pd
from ntd import update_dollars
from carto import replace_data
import settings
def process_result(i, y, var, indexes, frames):
"""Transform downloaded result to data frame by year"""
result = pd.DataFrame(i.json())
result.columns = result.iloc[0]
result = result.reindex(result.index.drop(0))
if 'metropolitan statistical area/micropolitan statistical area' in result.columns:
result.rename(
columns={'metropolitan statistical area/micropolitan statistical area':'GEOID'},
inplace=True
)
else:
result['GEOID'] = pd.Series(
result['state'] + result['county'] + result['tract']
).astype(str)
result['year'] = y
out = result.set_index(indexes)
df = out.groupby(level=out.index.names).last()
data = pd.to_numeric(df[var])
frames.append(data[data >= 0])
return frames
def combine_files(frame, geo, cols, index):
"""Merge downloaded ACS data with geo file"""
return pd.concat(frame, axis=1).reset_index().merge(
geo, on='GEOID', how='inner'
).drop(
columns=cols
).dropna(
subset=['pop']
).set_index(index)
def download_census():
"""Download and parse ACS data as defined in acs.json"""
geo = pd.read_csv('data/geojson/tracts/cbsa_crosswalk.csv', dtype={'GEOID': object})
counties = geo.drop_duplicates(
['STATEFP', 'COUNTYFP', 'msaid']
)
counties = counties.groupby('STATEFP')[['STATEFP', 'COUNTYFP', 'msaid']].apply(
lambda x: x.set_index('COUNTYFP').to_dict(orient='index')
).to_dict()
msa_geo = pd.read_csv('data/geojson/cbsa/cb_2017_us_cbsa_500k.csv', dtype={'GEOID': object})
indexes = ['GEOID', 'year']
msa_indexes = ['GEOID', 'year']
with open('data/census/acs.json', "r") as read_file:
meta = json.load(read_file)
acs = []
msa = []
for r in meta:
if 'var' in r:
filename = 'data/output/census/' + r['key'] + '.csv'
if os.path.isfile(filename):
csv = pd.read_csv(
filename, dtype={'GEOID': object}
).set_index(indexes)
df = csv.groupby(level=csv.index.names).last()
acs.append(df)
elif r['var'] != '99999999':
frames = []
errors = []
for y in range(2010, 2019):
for s in counties:
urls = errors
errors = []
for c in counties[s]:
urls.append('https://api.census.gov/data/' + str(y) + \
'/acs/acs5?get=' + r['var'] + '&for=tract:*&in=state:' + \
str(s).zfill(2) + '%20county:' + str(c).zfill(3) + '&key=' + \
settings.CENSUS_API)
rs = (grequests.get(u) for u in urls)
res = grequests.map(rs)
for i in res:
try:
frames = process_result(i, y, r['var'], indexes, frames)
except:
try:
print(i.text)
errors.append(i.url)
except:
pass
print('Loaded:', r['name'], y, s)
print('-----')
if errors:
print('Retrying', len(errors), 'errors')
ind = pd.Series(pd.concat(frames), name=r['key'])
ind.to_csv(filename, header=r['key'])
acs.append(ind)
filename = 'data/output/msa/' + r['key'] + '.csv'
if os.path.isfile(filename):
csv = pd.read_csv(
filename, dtype={'GEOID': object}
).set_index(indexes)
df = csv.groupby(level=csv.index.names).last()
msa.append(df)
elif r['var'] != '99999999':
frames = []
for y in range(2010, 2019):
urls = ['https://api.census.gov/data/' + str(y) + \
'/acs/acs5?get=' + r['var'] + \
'&for=metropolitan statistical area/micropolitan statistical area:*' + \
'&key=' + settings.CENSUS_API]
rs = (grequests.get(u) for u in urls)
res = grequests.map(rs)
frames = process_result(res[0], y, r['var'], indexes, frames)
ind = pd.Series(pd.concat(frames), name=r['key'])
ind.to_csv(filename, header=r['key'])
msa.append(ind)
combined = combine_files(
acs,
geo,
['STATEFP', 'COUNTYFP', 'TRACTCE', 'AFFGEOID', 'NAME', 'AWATER', 'LSAD', 'CBSAFP'],
indexes
)
msa_combo = combine_files(
msa,
msa_geo,
['AFFGEOID', 'NAME', 'AWATER', 'LSAD', 'CBSAFP'],
msa_indexes
)
for d in meta:
print(d['name'])
if 'upload' in d and d['upload']:
indexes.append(d['key'])
if 'msa' in d and d['msa']:
msa_indexes.append(d['key'])
if 'inflation' in d:
combined[d['key']] = update_dollars(pd.Series(combined[d['key']], name=d['key']))
msa_combo[d['key']] = update_dollars(pd.Series(msa_combo[d['key']], name=d['key']))
if 'var' not in d:
if 'sum' in d:
combined[d['key']] = 0
if 'msa' in d and d['msa']:
msa_combo[d['key']] = 0
for s in d['sum']:
combined[d['key']] = combined[d['key']] + combined[s]
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['key']] + msa_combo[s]
else:
combined[d['key']] = combined[d['numerator']].divide(
combined[d['denominator']],
fill_value=0
)
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['numerator']].divide(
msa_combo[d['denominator']],
fill_value=0
)
if 'scale' in d:
combined[d['key']] = combined[d['key']] * d['scale']
if 'msa' in d and d['msa']:
msa_combo[d['key']] = msa_combo[d['key']] * d['scale']
export_msa = msa_combo.reset_index()
export_msa_filtered = export_msa[
export_msa.GEOID.isin([str(i) for i in combined.msaid.unique().tolist()])
]
export_msa_filtered[msa_indexes].astype({'pop': 'int32'}).to_csv(
'data/output/census_msa.csv', index=False
)
replace_data('census_msa', msa_indexes, 'census_msa.csv')
indexes.append('msaid')
export = combined.reset_index()
export[indexes].to_csv('data/output/census.csv', index=False)
replace_data('census', indexes, 'census.csv')
if __name__ == "__main__":
download_census()
|
the-stack_0_2307 | import warnings
from collections import namedtuple
from contextlib import suppress
import boto3
from botocore.exceptions import ClientError
from dagster import Array, Field, Noneable, ScalarUnion, StringSource, check
from dagster.core.events import EngineEventData, MetadataEntry
from dagster.core.launcher.base import LaunchRunContext, RunLauncher
from dagster.grpc.types import ExecuteRunArgs
from dagster.serdes import ConfigurableClass
from dagster.utils import merge_dicts
from ..secretsmanager import get_secrets_from_arns, get_tagged_secrets
from .tasks import default_ecs_task_definition, default_ecs_task_metadata
from .utils import sanitize_family
Tags = namedtuple("Tags", ["arn", "cluster", "cpu", "memory"])
class EcsRunLauncher(RunLauncher, ConfigurableClass):
"""RunLauncher that starts a task in ECS for each Dagster job run."""
def __init__(
self,
inst_data=None,
task_definition=None,
container_name="run",
secrets=None,
secrets_tag="dagster",
include_sidecars=False,
):
self._inst_data = inst_data
self.ecs = boto3.client("ecs")
self.ec2 = boto3.resource("ec2")
self.secrets_manager = boto3.client("secretsmanager")
self.task_definition = task_definition
self.container_name = container_name
self.secrets = secrets or []
if all(isinstance(secret, str) for secret in self.secrets):
warnings.warn(
"Setting secrets as a list of ARNs is deprecated. "
"Secrets should instead follow the same structure as the ECS API: "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html",
DeprecationWarning,
)
self.secrets = get_secrets_from_arns(self.secrets_manager, self.secrets)
else:
self.secrets = {secret["name"]: secret["valueFrom"] for secret in self.secrets}
self.secrets_tag = secrets_tag
self.include_sidecars = include_sidecars
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=task_definition)
container_names = [
container.get("name")
for container in task_definition["taskDefinition"]["containerDefinitions"]
]
check.invariant(
container_name in container_names,
f"Cannot override container '{container_name}' in task definition "
f"'{self.task_definition}' because the container is not defined.",
)
self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {
"task_definition": Field(
StringSource,
is_required=False,
description=(
"The task definition to use when launching new tasks. "
"If none is provided, each run will create its own task "
"definition."
),
),
"container_name": Field(
StringSource,
is_required=False,
default_value="run",
description=(
"The container name to use when launching new tasks. Defaults to 'run'."
),
),
"secrets": Field(
Array(
ScalarUnion(
scalar_type=str,
non_scalar_schema={"name": StringSource, "valueFrom": StringSource},
)
),
is_required=False,
description=(
"An array of AWS Secrets Manager secrets. These secrets will "
"be mounted as environment variabls in the container. See "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html."
),
),
"secrets_tag": Field(
Noneable(StringSource),
is_required=False,
default_value="dagster",
description=(
"AWS Secrets Manager secrets with this tag will be mounted as "
"environment variables in the container. Defaults to 'dagster'."
),
),
"include_sidecars": Field(
bool,
is_required=False,
default_value=False,
description=(
"Whether each run should use the same sidecars as the task that launches it. "
"Defaults to False."
),
),
}
@staticmethod
def from_config_value(inst_data, config_value):
return EcsRunLauncher(inst_data=inst_data, **config_value)
def _set_ecs_tags(self, run_id, task_arn):
try:
tags = [{"key": "dagster/run_id", "value": run_id}]
self.ecs.tag_resource(resourceArn=task_arn, tags=tags)
except ClientError:
pass
def _set_run_tags(self, run_id, task_arn):
cluster = self._task_metadata().cluster
tags = {"ecs/task_arn": task_arn, "ecs/cluster": cluster}
self._instance.add_run_tags(run_id, tags)
def _get_run_tags(self, run_id):
run = self._instance.get_run_by_id(run_id)
tags = run.tags if run else {}
arn = tags.get("ecs/task_arn")
cluster = tags.get("ecs/cluster")
cpu = tags.get("ecs/cpu")
memory = tags.get("ecs/memory")
return Tags(arn, cluster, cpu, memory)
def launch_run(self, context: LaunchRunContext) -> None:
"""
Launch a run in an ECS task.
Currently, Fargate is the only supported launchType and awsvpc is the
only supported networkMode. These are the defaults that are set up by
docker-compose when you use the Dagster ECS reference deployment.
"""
run = context.pipeline_run
family = sanitize_family(
run.external_pipeline_origin.external_repository_origin.repository_location_origin.location_name
)
metadata = self._task_metadata()
pipeline_origin = context.pipeline_code_origin
image = pipeline_origin.repository_origin.container_image
task_definition = self._task_definition(family, metadata, image)["family"]
args = ExecuteRunArgs(
pipeline_origin=pipeline_origin,
pipeline_run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
command = args.get_command_args()
# Set cpu or memory overrides
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html
cpu_and_memory_overrides = {}
tags = self._get_run_tags(run.run_id)
if tags.cpu:
cpu_and_memory_overrides["cpu"] = tags.cpu
if tags.memory:
cpu_and_memory_overrides["memory"] = tags.memory
# Run a task using the same network configuration as this processes's
# task.
response = self.ecs.run_task(
taskDefinition=task_definition,
cluster=metadata.cluster,
overrides={
"containerOverrides": [
{
"name": self.container_name,
"command": command,
# containerOverrides expects cpu/memory as integers
**{k: int(v) for k, v in cpu_and_memory_overrides.items()},
}
],
# taskOverrides expects cpu/memory as strings
**cpu_and_memory_overrides,
},
networkConfiguration={
"awsvpcConfiguration": {
"subnets": metadata.subnets,
"assignPublicIp": metadata.assign_public_ip,
"securityGroups": metadata.security_groups,
}
},
launchType="FARGATE",
)
tasks = response["tasks"]
if not tasks:
failures = response["failures"]
exceptions = []
for failure in failures:
arn = failure.get("arn")
reason = failure.get("reason")
detail = failure.get("detail")
exceptions.append(Exception(f"Task {arn} failed because {reason}: {detail}"))
raise Exception(exceptions)
arn = tasks[0]["taskArn"]
self._set_run_tags(run.run_id, task_arn=arn)
self._set_ecs_tags(run.run_id, task_arn=arn)
self._instance.report_engine_event(
message="Launching run in ECS task",
pipeline_run=run,
engine_event_data=EngineEventData(
[
MetadataEntry("ECS Task ARN", value=arn),
MetadataEntry("ECS Cluster", value=metadata.cluster),
MetadataEntry("Run ID", value=run.run_id),
]
),
cls=self.__class__,
)
def can_terminate(self, run_id):
tags = self._get_run_tags(run_id)
if not (tags.arn and tags.cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status and status != "STOPPED":
return True
return False
def terminate(self, run_id):
tags = self._get_run_tags(run_id)
if not (tags.arn and tags.cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status == "STOPPED":
return False
self.ecs.stop_task(task=tags.arn, cluster=tags.cluster)
return True
def _task_definition(self, family, metadata, image):
"""
Return the launcher's task definition if it's configured.
Otherwise, a new task definition revision is registered for every run.
First, the process that calls this method finds its own task
definition. Next, it creates a new task definition based on its own
but it overrides the image with the pipeline origin's image.
"""
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)
return task_definition["taskDefinition"]
secrets = merge_dicts(
(
get_tagged_secrets(self.secrets_manager, self.secrets_tag)
if self.secrets_tag
else {}
),
self.secrets,
)
secrets_dict = (
{"secrets": [{"name": key, "valueFrom": value} for key, value in secrets.items()]}
if secrets
else {}
)
task_definition = {}
with suppress(ClientError):
task_definition = self.ecs.describe_task_definition(taskDefinition=family)[
"taskDefinition"
]
container_definitions = task_definition.get("containerDefinitions", [{}])
for container_definition in container_definitions:
if (
container_definition.get("image") == image
and container_definition.get("name") == self.container_name
and container_definition.get("secrets") == secrets_dict.get("secrets", [])
):
return task_definition
return default_ecs_task_definition(
self.ecs,
family,
metadata,
image,
self.container_name,
secrets=secrets_dict,
include_sidecars=self.include_sidecars,
)
def _task_metadata(self):
return default_ecs_task_metadata(self.ec2, self.ecs)
|
the-stack_0_2308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Provides endpoint and web page for simple search API."""
import os
import json
from typing import Iterable, Iterator
from flask import Flask, render_template, abort, jsonify
from webargs.flaskparser import use_kwargs
from webargs import fields
FIELD_NAMES = ['job_history', 'company', 'email', 'city', 'country', 'name']
app = Flask(__name__)
def must_match_field_name(value):
return value in FIELD_NAMES
def prepare_data(data: Iterable[dict]) -> Iterator[dict]:
"""Make job_history list comma delimited for ease of processing/display.
"""
for datum in data:
datum['job_history'] = ', '.join(datum['job_history'])
yield datum
def filtered_results(data: Iterable[dict],
query: str,
field: str) -> Iterator[dict]:
if not query:
yield from data
return
for datum in data:
if field:
# Case-insensitive for simplicity
if query.lower() in datum[field].lower():
yield datum
else:
for field_name in FIELD_NAMES:
if query.lower() in datum[field_name].lower():
yield datum
break
@app.route("/", methods=['get'])
def search():
return render_template('search.html')
@app.route("/search", methods=['get'])
@use_kwargs({
'query': fields.Str(missing=None),
'field': fields.Str(missing=None, validate=must_match_field_name),
'size': fields.Int(missing=20),
'offset': fields.Int(missing=0)
}, location="query")
def search_api(query, field, size, offset):
# File used in this example instead of further API call
# or database connection
json_path = os.path.join(app.root_path,
'static/json',
'mock-contacts.json')
data = json.load(open(json_path))
prepped_data = prepare_data(data)
results = list(filtered_results(prepped_data, query, field))
index_start = size * offset
if index_start > len(results):
abort(400)
index_stop = min(size + (size * offset), len(results))
body = {
'results': results[index_start:index_stop],
'total': len(results)
}
return jsonify(body)
@app.route("/rowchanged", methods=['post'])
@use_kwargs({
'rowindex': fields.Int(missing=None),
'oldvalue': fields.Str(missing=None),
'newvalue': fields.Str(missing=None),
'data': fields.Dict(missing=None),
}, location="json")
def rowchanged(rowindex, oldvalue, newvalue, data):
print(f"rowchanged(): row {rowindex}, '{oldvalue}' -> '{newvalue}'")
result = {
'resultcode': 'OK'
}
return jsonify(result)
if __name__ == '__main__':
app.run()
|
the-stack_0_2309 | from functools import partial
import trw
import torch.nn as nn
import torch
class Net(nn.Module):
def __init__(self):
super().__init__()
self.encoder_decoder = trw.layers.AutoencoderConvolutional(
2,
1,
[8, 16, 32],
[32, 16, 8, 1], # make sure we are cropping the decoded output by adding another layer
convolution_kernels=3,
squash_function=torch.sigmoid, # make sure the output is [0..1] domain
last_layer_is_output=True # do not apply the activation on the last layer
)
def forward(self, batch):
x = batch['images']
encoded_x, decoded_x = self.encoder_decoder.forward_with_intermediate(x)
return {
'regression': trw.train.OutputRegression(decoded_x, x),
}
def per_epoch_fn():
callbacks = [
trw.callbacks.CallbackEpochSummary(),
trw.callbacks.CallbackSkipEpoch(
nb_epochs=1,
callbacks=[trw.callbacks.CallbackReportingExportSamples(table_name='random_samples', max_samples=5, split_exclusions=['train'])]),
]
return callbacks
options = trw.train.Options(num_epochs=100)
trainer = trw.train.TrainerV2(
callbacks_per_epoch=per_epoch_fn(),
callbacks_post_training=[trw.callbacks.CallbackReportingExportSamples(max_samples=2000)],
)
results = trainer.fit(
options,
datasets=trw.datasets.create_mnist_dataset(normalize_0_1=True),
log_path='mnist_autoencoder',
model=Net(),
optimizers_fn=partial(trw.train.create_sgd_optimizers_fn, learning_rate=0.25))
|
the-stack_0_2310 | import time
import hashlib
import matplotlib.pyplot as plot
from passlib.hash import b
import random
import argon
# Random salt generation
def ransalt():
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
chars = []
for i in range(16):
chars.append(random.choice(ALPHABET))
return "".join(chars)
# Variables
string = input('Enter the benchmarking string: ')
key = b + input('Please specify a key between 4 and 56 bytes: ')
algo = ['MD5', 'SHA-1', 'SHA-224', 'SHA-256', 'SHA-384', 'SHA-512', 'Scrpyt', 'Bcrypt', 'Argon2']
colors = ['b', 'c', 'y', 'm', 'r', 'k', 'l', 'm', 'f']
results = {}
# Getting iterations and step
iterations = int(raw_input("Iterations: "))
while iterations < 1 or iterations > 1000000:
iterations = int(raw_input("Please enter a valid value for the number of iterations (1-1000000): "))
step = int(raw_input("Step: "))
while step < 1 or step > 1000000:
step = int(raw_input("Please enter a valid value for the step (1-1000000): "))
print("\nbenchmarking...\n")
# MD5
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.md5(string)
results[0,(i+1)*step] = (time.time() - Start)
print("\nMD5 benchmark done.\n")
# SHA-1
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha1(string)
results[1, (i+1)*step] = (time.time() - Start)
print("\nSHA-1 benchmark done.\n")
# SHA-224
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha224(string)
results[2, (i+1)*step] = (time.time() - Start)
print("\nSHA-224 benchmark done.\n")
# SHA-256
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha256(string)
results[3, (i+1)*step] = (time.time() - Start)
print("\nSHA-256 benchmark done.\n")
# SHA-384
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha384(string)
results[4, (i+1)*step] = (time.time() - Start)
print("\nSHA-384 benchmark done.\n")
# SHA-512
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.sha512(string)
results[5, (i+1)*step] = (time.time() - Start)
print("\nSHA-512 benchmark done.\n")
# Bcrypt
Start = time.time()
tString = b + string
for i in range (iterations):
for j in range ((i+1)*step):
bcrypt.hashpw(tString, bcrypt.gensalt()) #random salt
results[6, (i+1)*step] = (time.time() - Start)
print("\nBcrypt benchmark done.\n")
# Scrypt
Start = time.time()
tString = b + string #redundant but for exhibition purposes
for i in range (iterations):
for j in range ((i+1)*step):
hashlib.scrypt(key, ransalt())
results[7, (i+1)*step] = (time.time() - Start)
print("\nScrypy benchmark done.\n")
# Argon2
Start = time.time()
for i in range (iterations):
for j in range ((i+1)*step):
argon2.argon2_hash(string, ransalt())
results[8, (i+1)*step] = (time.time() - Start)
print("\nArgon2 benchmark done.\n")
# Generate plot and print results
print("\n---------- Report ----------\n")
for i in range(9):
print(algo[i])
for j in range (iterations):
print((j+1)*step, 'iterations in', results[i,(j+1)*step]*pow(10,3), 'ms')
plot.plot((j+1)*step, results[i,(j+1)*step]*pow(10,3),colors[i]+'o', label=str(algo[i]) if j == 0 else "")
print('\n')
plot.xlabel('Iterations')
plot.ylabel('Execution time in milliseconds')
plot.title('PyBenchHash', fontsize=40, color='white')
plot.legend(loc=2)
plot.grid(True)
plot.show() |
the-stack_0_2311 | #!/usr/bin/env python3
import os
import math
from numbers import Number
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.drive_helpers import get_lag_adjusted_curvature
from selfdrive.controls.lib.longcontrol import LongControl
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI, EON
from selfdrive.manager.process_config import managed_processes
SOFT_DISABLE_TIME = 3 # seconds
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
REPLAY = "REPLAY" in os.environ
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = {"rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned",
"logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad",
"statsd", "shutdownd"} | \
{k for k, v in managed_processes.items() if not v.enabled}
ACTUATOR_FIELDS = set(car.CarControl.Actuators.schema.fields.keys())
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
ButtonEvent = car.CarState.ButtonEvent
SafetyModel = car.CarParams.SafetyModel
IGNORED_SAFETY_MODES = [SafetyModel.silent, SafetyModel.noOutput]
CSID_MAP = {"0": EventName.roadCameraError, "1": EventName.wideRoadCameraError, "2": EventName.driverCameraError}
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
params = Params()
self.joystick_mode = params.get_bool("JoystickDebugMode")
joystick_packet = ['testJoystick'] if self.joystick_mode else []
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaStates', 'peripheralState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets + joystick_packet,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CI.CC is not None and not passive and not self.CP.dashcamOnly
self.read_only = not car_recognized or not controller_available or self.CP.dashcamOnly
if self.read_only:
safety_config = car.CarParams.SafetyConfig.new_message()
safety_config.safetyModel = car.CarParams.SafetyModel.noOutput
self.CP.safetyConfigs = [safety_config]
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP, self.CI)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP, self.CI)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.cruise_mismatch_counter = 0
self.can_rcv_error_counter = 0
self.last_blinker_frame = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.button_timers = {ButtonEvent.Type.decelCruise: 0, ButtonEvent.Type.accelCruise: 0}
self.last_actuators = car.CarControl.Actuators.new_message()
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, len(self.CP.carFw) > 0)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
if len(self.CP.carFw) > 0:
set_offroad_alert("Offroad_CarUnrecognized", True)
else:
set_offroad_alert("Offroad_NoFirmware", True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
elif self.joystick_mode:
self.events.add(EventName.joystickDebug, static=True)
self.startup_event = None
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
# Add startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Create events for battery, temperature, disk space, and memory
if EON and (self.sm['peripheralState'].pandaType != PandaType.uno) and \
self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7 and not SIMULATION:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
# TODO: make tici threshold the same
if self.sm['deviceState'].memoryUsagePercent > (90 if TICI else 65) and not SIMULATION:
self.events.add(EventName.lowMemory)
# TODO: enable this once loggerd CPU usage is more reasonable
#cpus = list(self.sm['deviceState'].cpuUsagePercent)[:(-1 if EON else None)]
#if max(cpus, default=0) > 95 and not SIMULATION:
# self.events.add(EventName.highCpuUsage)
# Alert if fan isn't spinning for 5 seconds
if self.sm['peripheralState'].pandaType in (PandaType.uno, PandaType.dos):
if self.sm['peripheralState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in (LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing):
self.events.add(EventName.laneChange)
if not CS.canValid:
self.events.add(EventName.canError)
for i, pandaState in enumerate(self.sm['pandaStates']):
# All pandas must match the list of safetyConfigs, and if outside this list, must be silent or noOutput
if i < len(self.CP.safetyConfigs):
safety_mismatch = pandaState.safetyModel != self.CP.safetyConfigs[i].safetyModel or \
pandaState.safetyParam != self.CP.safetyConfigs[i].safetyParam or \
pandaState.unsafeMode != self.CP.unsafeMode
else:
safety_mismatch = pandaState.safetyModel not in IGNORED_SAFETY_MODES
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if log.PandaState.FaultType.relayMalfunction in pandaState.faults:
self.events.add(EventName.relayMalfunction)
# Check for HW or system issues
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaStates"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid() or self.can_rcv_error:
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
invalid = [s for s, valid in self.sm.valid.items() if not valid]
not_alive = [s for s, alive in self.sm.alive.items() if not alive]
cloudlog.event("commIssue", invalid=invalid, not_alive=not_alive, can_error=self.can_rcv_error, error=True)
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(3. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)
# Check for FCW
stock_long_is_braking = self.enabled and not self.CP.openpilotLongitudinalControl and CS.aEgo < -1.5
model_fcw = self.sm['modelV2'].meta.hardBrakePredicted and not CS.brakePressed and not stock_long_is_braking
planner_fcw = self.sm['longitudinalPlan'].fcw and self.enabled
if planner_fcw or model_fcw:
self.events.add(EventName.fcw)
if TICI:
for m in messaging.drain_sock(self.log_sock, wait_for_one=False):
try:
msg = m.androidLog.message
if any(err in msg for err in ("ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED")):
csid = msg.split("CSID:")[-1].split(" ")[0]
evt = CSID_MAP.get(csid, None)
if evt is not None:
self.events.add(evt)
except UnicodeDecodeError:
pass
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000):
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
if self.sm['liveLocationKalman'].excessiveResets:
self.events.add(EventName.localizerMalfunction)
# Check if all manager processes are running
not_running = {p.name for p in self.sm['managerState'].processes if not p.running}
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
speeds = self.sm['longitudinalPlan'].speeds
if len(speeds) > 1:
v_future = speeds[-1]
else:
v_future = 100.0
if CS.brakePressed and v_future >= self.CP.vEgoStarting \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
if not self.initialized:
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if all_valid or self.sm.frame * DT_CTRL > 3.5 or SIMULATION:
if not self.read_only:
self.CI.init(self.CP, self.can_sock, self.pm.sock['sendcan'])
self.initialized = True
if REPLAY and self.sm['pandaStates'][0].controlsAllowed:
self.state = State.enabled
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_rcv_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
# All pandas not in silent mode must have controlsAllowed when openpilot is enabled
if self.enabled and any(not ps.controlsAllowed for ps in self.sm['pandaStates']
if ps.safetyModel not in IGNORED_SAFETY_MODES):
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.pcmCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.button_timers, self.enabled, self.is_metric)
elif CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = int(SOFT_DISABLE_TIME / DT_CTRL)
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
sr = max(params.steerRatio, 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
actuators.longControlState = self.LoC.long_control_state
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
if not self.joystick_mode:
# accel PID loop
pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_kph * CV.KPH_TO_MS)
t_since_plan = (self.sm.frame - self.sm.rcv_frame['longitudinalPlan']) * DT_CTRL
actuators.accel = self.LoC.update(self.active, CS, self.CP, long_plan, pid_accel_limits, t_since_plan)
# Steering PID loop and lateral MPC
lat_active = self.active and not CS.steerWarning and not CS.steerError and CS.vEgo > self.CP.minSteerSpeed
desired_curvature, desired_curvature_rate = get_lag_adjusted_curvature(self.CP, CS.vEgo,
lat_plan.psis,
lat_plan.curvatures,
lat_plan.curvatureRates)
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(lat_active, CS, self.CP, self.VM, params, self.last_actuators,
desired_curvature, desired_curvature_rate)
else:
lac_log = log.ControlsState.LateralDebugState.new_message()
if self.sm.rcv_frame['testJoystick'] > 0 and self.active:
actuators.accel = 4.0*clip(self.sm['testJoystick'].axes[0], -1, 1)
steer = clip(self.sm['testJoystick'].axes[1], -1, 1)
# max angle is 45 for angle-based cars
actuators.steer, actuators.steeringAngleDeg = steer, steer * 45.
lac_log.active = True
lac_log.steeringAngleDeg = CS.steeringAngleDeg
lac_log.output = steer
lac_log.saturated = abs(steer) >= 0.9
# Send a "steering required alert" if saturation count has reached the limit
if lac_log.active and lac_log.saturated and not CS.steeringPressed:
dpath_points = lat_plan.dPathPoints
if len(dpath_points):
# Check if we deviated from the path
# TODO use desired vs actual curvature
left_deviation = actuators.steer > 0 and dpath_points[0] < -0.20
right_deviation = actuators.steer < 0 and dpath_points[0] > 0.20
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)
if not isinstance(attr, Number):
continue
if not math.isfinite(attr):
cloudlog.error(f"actuators.{p} not finite {actuators.to_dict()}")
setattr(actuators, p, 0.0)
return actuators, lac_log
def update_button_timers(self, buttonEvents):
# increment timer for buttons still pressed
for k in self.button_timers:
if self.button_timers[k] > 0:
self.button_timers[k] += 1
for b in buttonEvents:
if b.type.raw in self.button_timers:
self.button_timers[b.type.raw] = 1 if b.pressed else 0
def publish_logs(self, CS, start_time, actuators, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.active = self.active
CC.actuators = actuators
orientation_value = self.sm['liveLocationKalman'].orientationNED.value
if len(orientation_value) > 2:
CC.roll = orientation_value[0]
CC.pitch = orientation_value[1]
CC.cruiseControl.cancel = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
if self.joystick_mode and self.sm.rcv_frame['testJoystick'] > 0 and self.sm['testJoystick'].buttons[0]:
CC.cruiseControl.cancel = True
hudControl = CC.hudControl
hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
hudControl.rightLaneVisible = True
hudControl.leftLaneVisible = True
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
model_v2 = self.sm['modelV2']
desire_prediction = model_v2.meta.desirePrediction
if len(desire_prediction) and ldw_allowed:
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
l_lane_change_prob = desire_prediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = desire_prediction[Desire.laneChangeRight - 1]
lane_lines = model_v2.laneLines
l_lane_close = left_lane_visible and (lane_lines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (lane_lines[2].y[0] < (1.08 - CAMERA_OFFSET))
hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if hudControl.rightLaneDepart or hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event_types = set()
if ET.WARNING not in self.current_alert_types:
clear_event_types.add(ET.WARNING)
if self.enabled:
clear_event_types.add(ET.NO_ENTRY)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric, self.soft_disable_timer])
self.AM.add_many(self.sm.frame, alerts)
current_alert = self.AM.process_alerts(self.sm.frame, clear_event_types)
if current_alert:
hudControl.visualAlert = current_alert.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
self.last_actuators, can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
CC.actuatorsOutput = self.last_actuators
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo, params.roll)
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
if current_alert:
controlsState.alertText1 = current_alert.alert_text_1
controlsState.alertText2 = current_alert.alert_text_2
controlsState.alertSize = current_alert.alert_size
controlsState.alertStatus = current_alert.alert_status
controlsState.alertBlinkingRate = current_alert.alert_rate
controlsState.alertType = current_alert.alert_type
controlsState.alertSound = current_alert.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_rcv_error_counter
lat_tuning = self.CP.lateralTuning.which()
if self.joystick_mode:
controlsState.lateralControlState.debugState = lac_log
elif self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif lat_tuning == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif lat_tuning == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif lat_tuning == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, lac_log)
self.prof.checkpoint("Sent")
self.update_button_timers(CS.buttonEvents)
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
the-stack_0_2313 | """
Methods for computing confidence intervals.
"""
import scipy.special as special
import numpy as np
import pandas as pd
import scipy.stats as stats
def z_effect(ci_low, ci_high):
"""
Compute an effect score for a z-score.
Parameters
----------
ci_low :
Lower bound of the confidence interval
ci_high :
Upper bound of the confidence interval
Returns
-------
score :
An effect score for a Z-score
Notes
----
This is the absolute value of the lower bound of the confidence interval,
or zero if the interval contains zero.
"""
if np.isnan(ci_low) or np.isnan(ci_high):
return 0
return 0 if (ci_low * ci_high < 0) else min(abs(ci_low), abs(ci_high))
def ci_mi(g, dof, n, conf):
"""
Compute confidence interval for mutual information from the chi-squared
distribution.
Parameters
----------
g :
the G-test score
dof :
the number of degrees of freedom
n :
the size of the data sample
conf :
the confidence level
Returns
-------
ci_low :
The lower level of the confidence interval for MI
ci_high :
The upper level of the confidence interval for MI
References
----------
Smithson, M. (Ed.). (2003). Confidence Intervals. (07/140). Thousand Oaks,
CA: SAGE Publications, Inc. doi: http://dx.doi.org/10.4135/9781412983761
https://en.wikipedia.org/wiki/G-test
"""
p_low = 1-(1-conf)/2
p_high = (1-conf)/2
g_low = special.chndtrinc(g, dof, p_low)
g_high = special.chndtrinc(g, dof, p_high)
ci_low, ci_high = ((g_low+dof)/(2.0*n), (g_high+dof)/(2.0*n))
return ci_low, ci_high
def ci_norm(conf, stat, sigma):
"""
Confidence interval for a normal approximation.
Parameters
----------
conf :
the confidence level
stat :
the asymptotically normal statistic
sigma :
the standard deviation of the statistic
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
ci_low, ci_high = stats.norm.interval(conf, loc=stat, scale=sigma)
return ci_low, ci_high
def bootstrap_ci_ct(data, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation on a contingency table
Parameters
----------
data :
Contingency table collected from independent samples
stat :
Statistic to bootstrap. Takes a contingency table as argument
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
if isinstance(data, pd.DataFrame):
data = data.values
dim = data.shape
data = data.flatten()
data += 1
n = data.sum()
# print 'Bootstrap on data of size {}'.format(n)
probas = (1.0*data)/n
# Obtain `num_samples' random samples of `n' multinomial values, sampled
# with replacement from {0, 1, ..., n-1}. For each sample, rebuild a
# contingency table and compute the stat.
temp = np.random.multinomial(n, probas, size=num_samples)
bs_stats = [row.reshape(dim) for row in temp]
bs_stats = [stat(ct) for ct in bs_stats]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
def bootstrap_ci_corr(x, y, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation for correlation
Parameters
----------
x :
First dimension of the data
y :
Second dimension of the data
stat :
Statistic to bootstrap. Takes a two-dimensional array as input
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
data = np.array(zip(x, y))
n = len(data)
idxs = np.random.randint(0, n, (num_samples, n))
samples = [data[idx] for idx in idxs]
bs_stats = [stat(sample[:, 0], sample[:, 1]) for sample in samples]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
def bootstrap_ci_ct_cond(data, stat, num_samples=10000, conf=0.95):
"""
Bootstrap confidence interval computation on a 3-way contingency table
Parameters
----------
data :
Contingency table collected from independent samples
stat :
Statistic to bootstrap. Takes a 3-way contingency table as argument
num_samples :
Number of bootstrap samples to generate
conf :
Confidence level for the interval
Returns
-------
ci_low :
The lower level of the confidence interval
ci_high :
The upper level of the confidence interval
"""
data = np.array([ct.values if isinstance(ct, pd.DataFrame)
else ct for ct in data])
dim = data.shape
data = [ct.flatten()+1 for ct in data]
probas = [(1.0*ct)/ct.sum() for ct in data]
# Resample for each explanatory group
temp = np.dstack([np.random.multinomial(data[i].sum(),
probas[i],
size=num_samples)
for i in range(dim[0])])
bs_stats = [row.T.reshape(dim) for row in temp]
bs_stats = [stat(ct) for ct in bs_stats]
alpha = 1-conf
ci_low = np.percentile(bs_stats, 100*alpha/2)
ci_high = np.percentile(bs_stats, 100*(1-alpha/2))
return ci_low, ci_high
|
the-stack_0_2314 | from django.test import TestCase
from mock import patch, MagicMock
from model_mommy import mommy
from dbaas.tests.helpers import DatabaseHelper
from logical.models import Database
from notification.tasks import check_database_is_alive
@patch('logical.models.Database.update_status', new=MagicMock())
@patch('notification.tasks.get_worker_name', new=MagicMock())
@patch('notification.tasks.TaskHistory.register')
class DatabaseStatusTestCase(TestCase):
def setUp(self):
self.task_history = mommy.make(
'TaskHistory',
task_name='notification.tasks.database_status',
)
def test_database_alive(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.ALIVE)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'SUCCESS')
self.assertIn('Database test is Alive', self.task_history.details)
def test_database_initializing(self, task_register_mock):
database = DatabaseHelper.create(
name='test', status=Database.INITIALIZING
)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'SUCCESS')
self.assertIn('Database test is Initializing',
self.task_history.details
)
def test_database_alert(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.ALERT)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'ERROR')
self.assertIn('Database test is Alert', self.task_history.details)
def test_database_dead(self, task_register_mock):
database = DatabaseHelper.create(name='test', status=Database.DEAD)
task_register_mock.return_value = self.task_history
check_database_is_alive(database, wait=0)
self.assertEqual(self.task_history.task_status, 'ERROR')
self.assertIn('Database test is Dead', self.task_history.details)
|
the-stack_0_2315 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 12:48:54 2019
@author: James Kring
@email: [email protected]
"""
import sys
sys.path.insert(0, '/home/cth/cthgroup/Python/recon')
from recon_input import InputClass
import click
# =============================================================================
# Example Commandline Use: python recon_runner.py shot 'times'
# python recon_runner.py 14092626 '1.62 1.63 1.64'
#
# Input files will be saved in shot_number directory that is in the
# working_directory set below.
# =============================================================================
@click.command(context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True,))
@click.pass_context
@click.argument('shotnumber')
@click.argument('times')
@click.option('--inputs', is_flag=True, help='Generates input files only')
def cmd_line(ctx, shotnumber, times, inputs):
d = dict()
for item in ctx.args:
d1 = [item.split('=')]
d.update(d1)
times = times.split()
n_times = []
for time in times:
n_times.append(float(time))
try:
with open('v3config.txt', 'r') as file:
lines = file.readlines()
marker = True
except:
marker = False
if marker:
for i, line in enumerate(lines):
if line.startswith('working_directory'):
working_directory1 = line.rstrip().split('=')[1]
elif line.startswith('vmec_template'):
vmec_template1 = line.rstrip().split('=')[1]
elif line.startswith('v3fit_template'):
v3fit_template1 = line.rstrip().split('=')[1]
elif line.startswith('v3fit_executable'):
v3fit_executable1 = line.rstrip().split('=')[1]
if 'v3fit_executable' not in d:
d['v3fit_executable']=v3fit_executable1
if 'directory' not in d:
d['directory']=working_directory1
if 'vmec_template' not in d:
d['vmec_template']=vmec_template1
if 'v3fit_template' not in d:
d['v3fit_template']=v3fit_template1
shot = InputClass(int(shotnumber), n_times,
**d)
else:
shot = InputClass(int(shotnumber), n_times, **d)
if inputs:
shot.generate_input_files()
else:
shot.generate_and_run()
if __name__ == '__main__':
cmd_line() |
the-stack_0_2316 | from code_pipeline.tests_generation import RoadTestFactory
from time import sleep
from swat_gen.road_gen import RoadGen
import logging as log
from code_pipeline.validation import TestValidator
from code_pipeline.tests_generation import RoadTestFactory
from scipy.interpolate import splprep, splev, interp1d, splrep
from shapely.geometry import LineString, Point, GeometryCollection
from numpy.ma import arange
class SwatTestGenerator:
"""
This simple test generator creates roads using affine tratsformations of vectors.
To generate the sequences of action, e.g "go straight", "turn right", "turn left"
a Markov chain used.
This generator can quickly create a number of tests, however their fault revealing power
isn't optimized and the roads can intersect.
"""
def __init__(self, time_budget=None, executor=None, map_size=None):
self.map_size = map_size
self.time_budget = time_budget
self.executor = executor
def start(self):
road = RoadGen(self.map_size, 5, 30, 10, 80)
while self.executor.get_remaining_time() > 0:
# Some debugging
log.info(
"Starting test generation. Remaining time %s",
self.executor.get_remaining_time(),
)
# generate the road points.
# class input values correspond to maximum distance to go stright and rotation angle
road.test_case_generate()
points = interpolate_road(road.road_points)
points = remove_invalid_cases(points, self.map_size)
the_test = RoadTestFactory.create_road_test(points)
# Some more debugging
log.info("Generated test using: %s", road.road_points)
#the_test = RoadTestFactory.create_road_test(road.road_points)
# Try to execute the test
test_outcome, description, execution_data = self.executor.execute_test(
the_test
)
# Print the result from the test and continue
log.info("test_outcome %s", test_outcome)
log.info("description %s", description)
if self.executor.road_visualizer:
sleep(5)
def interpolate_road(road):
#road.sort()
#print(road)
test_road = LineString([(t[0], t[1]) for t in road])
length = test_road.length
#print("Length", length)
old_x_vals = [t[0] for t in road]
old_y_vals = [t[1] for t in road]
if len(old_x_vals) == 2:
# With two points the only option is a straight segment
k = 1
elif len(old_x_vals) == 3:
# With three points we use an arc, using linear interpolation will result in invalid road tests
k = 2
else:
# Otheriwse, use cubic splines
k = 3
f2, u = splprep([old_x_vals, old_y_vals], s=0, k=k)
step_size = 1 / (length) * 10
xnew = arange(0, 1 + step_size, step_size)
x2, y2 = splev(xnew, f2)
nodes = list(zip(x2,y2))
return nodes
def remove_invalid_cases(points, map_size):
new_list = []
i = 0
while i < len(points):
if point_in_range_2(points[i], map_size) == 1:
new_list.append(points[i])
else:
return new_list
i+=1
return new_list
def point_in_range_2(a, map_size):
"""check if point is in the acceptable range"""
if ((0 + 4) < a[0] and a[0] < (map_size- 4)) and ((0 +4) < a[1] and a[1] < (map_size - 4)):
return 1
else:
return 0
if __name__ == "__main__":
tests = SwatTestGenerator(time_budget=250000, executor="mock", map_size=200)
tests.start()
|
the-stack_0_2318 | from botocore.exceptions import ClientError
# Stores found values to minimize AWS calls
PARAM_CACHE = {}
current_region = None
def get_special_param(client, func, param):
print('Getting info for func: {}, param: {}'.format(func, param))
if param in PARAM_CACHE:
return PARAM_CACHE[param]
if param == 'Bucket':
PARAM_CACHE[param] = get_bucket(client)
elif param == 'Attribute':
# Return 'Attribute directly because it doesn't need to reach out to AWS
return get_attribute(func)
elif param == 'Key':
PARAM_CACHE[param] = get_key(client)
return PARAM_CACHE[param]
def get_key(client, i=0):
try:
bucket = client.list_buckets()['Buckets'][i]['Name']
try:
key = client.list_objects_v2(
Bucket=bucket,
MaxKeys=1
).get('Contents', [{}])[0].get('Key')
return key
except KeyError:
get_key(client, i+1) # If this bucket is empty try the next one
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
return None
return None
def get_bucket(client):
try:
return client.list_buckets()['Buckets'][0]['Name']
except ClientError as error:
if error.response['Error']['Code'] == 'AccessDeniedException':
return None
return None
def get_attribute(func):
FUNC_ATTRIBUTES = {
'reset_image_attribute': 'launchPermission',
'reset_instance_attribute': 'kernel',
'reset_snapshot_attribute': 'createVolumePermission',
'describe_instance_attribute': 'instanceType',
'describe_image_attribute': 'description',
'describe_snapshot_attribute': 'productCodes',
'describe_vpc_attribute': 'enableDnsSupport',
}
return FUNC_ATTRIBUTES.get(func, None)
|
the-stack_0_2319 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Quantization define"""
import mindspore as ms
import mindspore.nn as nn
from mindspore import Parameter, Tensor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.common.initializer import initializer
#------weight symmetric, activation asymmetric------#
class QuanConv(nn.Conv2d):
r"""Conv for quantization"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_mode='same',
padding=0, dilation=1, group=1, has_bias=True):
super(QuanConv, self).__init__(in_channels, out_channels,
kernel_size, stride, pad_mode, padding, dilation, group, has_bias)
self.floor = P.Floor()
self.expand_dims = P.ExpandDims()
self.x_lower_bound = Tensor(0, ms.float32)
self.x_upper_bound = Tensor(2 ** 8 - 1, ms.float32)
self.w_lower_bound = Tensor(-2 ** 7 - 1, ms.float32)
self.w_upper_bound = Tensor(2 ** 7, ms.float32)
self.scale_a = Parameter(initializer('ones', [1]), name='scale_a')
self.scale_w = Parameter(initializer(
'ones', [out_channels]), name='scale_w')
self.zp_a = Parameter(initializer('ones', [1]), name='zp_a')
def construct(self, in_data):
r"""construct of QuantConv"""
x = self.floor(in_data / self.scale_a - self.zp_a + 0.5)
x = C.clip_by_value(x, self.x_lower_bound, self.x_upper_bound)
x = (x + self.zp_a) * self.scale_a
exp_dim_scale_w = self.scale_w
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 1)
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 2)
exp_dim_scale_w = self.expand_dims(exp_dim_scale_w, 3)
w = self.floor(self.weight / exp_dim_scale_w + 0.5)
w = C.clip_by_value(w, self.w_lower_bound, self.w_upper_bound)
w = w * exp_dim_scale_w
# forward
output = self.conv2d(x, w)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
|
the-stack_0_2320 | from django.contrib.auth.models import User
from image_loader.image.models import MainImage, Image
from image_loader.plan.models import UserPlan, Plan
from rest_framework.test import APITestCase
from django.urls import reverse
from django.core.files import File
class TestAPI(APITestCase):
@classmethod
def setUpTestData(cls):
"""
Mock some objects
"""
enterprise_user = User.objects.create_user(
username="enterprise",
password="enterprise",
)
enterprise_plan = Plan.objects.create(
plan_name="Enterprise",
allowed_sizes="200 400",
acces_to_the_og=True,
ability_to_generate_expiring_links=True,
)
UserPlan.objects.create(
user=enterprise_user,
plan=enterprise_plan
)
basic_user = User.objects.create_user(
username="basic",
password="basic",
)
basic_plan = Plan.objects.create(
plan_name="Basic",
allowed_sizes="200",
acces_to_the_og=False,
ability_to_generate_expiring_links=False,
)
UserPlan.objects.create(
user=basic_user,
plan=basic_plan
)
def test_get_allowed_sizes(self):
"""
test if obj method returns correct data
"""
plan = Plan.objects.get(plan_name="Enterprise")
self.assertEqual(plan.get_allowed_sizes(), ["200", "400"])
plan = Plan.objects.get(plan_name="Basic")
self.assertEqual(plan.get_allowed_sizes(), ["200"])
def test_image_main_view_set_basic(self):
"""
image uploader, Basic Plan
"""
url = reverse("image:mainimage-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 403) ## because of unauth
user = User.objects.get(username="basic")
self.client.force_authenticate(user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200) ## auth, OK
data = {}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## beacuse of empty image
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.data["image_name"], "test")
images = response.data["images"]
self.assertEqual(len(images), 1) ## just image of size 200
data = {}
data["image"] = File(open("media/test/test.bmp", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## because of the incorrect extension
self.assertEqual(str(response.data["image"][0]), "Incorrect file!")
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 400) ## same file already exists
def test_image_main_views_set_and_detail_enterprise(self):
"""
image uploader, Enerprise Plan
"""
user = User.objects.get(username="enterprise")
self.client.force_authenticate(user)
url = reverse("image:mainimage-list")
data = {}
data["image"] = File(open("media/test/test.jpg", "rb"))
response = self.client.post(url, data)
self.assertEqual(len(response.data["images"]), 3) ## 200, 400 and original photo
url = reverse("image:mainimage-detail", kwargs={"image_name": "test"})
response = self.client.get(url)
self.assertEqual(response.data["image_name"], "test")
self.assertEqual(len(response.data["images"]), 3) ## 200, 400 and original photo
def test_generate_link_api_view(self):
"""
generating temporary links to images
"""
url = reverse("image:mainimage-list")
data = {}
data["image"] = File(open("media/test/test.jpg", "rb"))
user = User.objects.get(username="enterprise")
self.client.force_authenticate(user)
response = self.client.post(url, data)
self.assertEqual(response.status_code, 201)
url = reverse("image:generate-link")
data = {
"expires_after": 1000,
"size": 200,
"image_name": "test"
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
link = response.data["link"]
response = self.client.get(link)
self.assertEqual(response.status_code, 200) |
the-stack_0_2321 | from django.test import TestCase
from django.test.client import Client
from wagtail.wagtailredirects import models
def get_default_site():
from wagtail.wagtailcore.models import Site
return Site.objects.filter(is_default_site=True).first()
def get_default_host():
return get_default_site().root_url.split('://')[1]
class TestRedirects(TestCase):
def test_path_normalisation(self):
# Shortcut to normalise function (to keep things tidy)
normalise_path = models.Redirect.normalise_path
# Create a path
path = normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')
# Test against equivilant paths
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')) # The exact same URL
self.assertEqual(path, normalise_path('Hello/world.html?foo=Bar&Baz=quux2')) # Leading slash can be omitted
self.assertEqual(path, normalise_path('Hello/world.html/?foo=Bar&Baz=quux2')) # Trailing slashes are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2#cool')) # Fragments are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?Baz=quux2&foo=Bar')) # Order of query string paramters are ignored
# Test against different paths
self.assertNotEqual(path, normalise_path('/hello/world.html?foo=Bar&Baz=quux2')) # 'hello' is lowercase
self.assertNotEqual(path, normalise_path('/Hello/world?foo=Bar&Baz=quux2')) # No '.html'
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=bar&Baz=Quux2')) # Query string parameters have wrong case
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=Bar&baz=quux2')) # ditto
self.assertNotEqual(path, normalise_path('/Hello/WORLD.html?foo=Bar&Baz=quux2')) # 'WORLD' is uppercase
self.assertNotEqual(path, normalise_path('/Hello/world.htm?foo=Bar&Baz=quux2')) # '.htm' is not the same as '.html'
# Normalise some rubbish to make sure it doesn't crash
normalise_path('This is not a URL')
normalise_path('//////hello/world')
normalise_path('!#@%$*')
normalise_path('C:\\Program Files (x86)\\Some random program\\file.txt')
def test_basic_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', site=get_default_site())
redirect.save()
# Navigate to it
r = c.get('/redirectme/', HTTP_HOST=get_default_host())
# Check that we were redirected
self.assertEqual(r.status_code, 301)
self.assertTrue(r.has_header('Location'))
def test_temporary_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', site=get_default_site(), is_permanent=False)
redirect.save()
# Navigate to it
r = c.get('/redirectme/', HTTP_HOST=get_default_host())
# Check that we were redirected temporarily
self.assertEqual(r.status_code, 302)
self.assertTrue(r.has_header('Location')) |
the-stack_0_2323 | import logging
import pytest
log = logging.getLogger("dexbot")
log.setLevel(logging.DEBUG)
@pytest.fixture()
def worker(strategybase):
return strategybase
@pytest.mark.mandatory
def test_init(worker):
pass
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_get_operational_balance(asset, worker, monkeypatch):
share = 0.1
def get_share(*args):
return share
symbol = worker.market[asset]['symbol']
balance = worker.balance(symbol)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount']
monkeypatch.setattr(worker, 'get_worker_share_for_asset', get_share)
op_balance = worker.get_operational_balance()
assert op_balance[asset] == balance['amount'] * share
|
the-stack_0_2324 | """
Please see
https://computationalmindset.com/en/neural-networks/ordinary-differential-equation-solvers.html#ode1
for details
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from torchdiffeq import odeint
ode_fn = lambda t, x: torch.sin(t) + 3. * torch.cos(2. * t) - x
an_sol = lambda t : (1./2.) * np.sin(t) - (1./2.) * np.cos(t) + \
(3./5.) * np.cos(2.*t) + (6./5.) * np.sin(2.*t) - \
(1./10.) * np.exp(-t)
t_begin=0.
t_end=10.
t_nsamples=100
t_space = np.linspace(t_begin, t_end, t_nsamples)
x_init = torch.Tensor([0.])
x_an_sol = an_sol(t_space)
x_num_sol = odeint(ode_fn, x_init, torch.Tensor(t_space))
plt.figure()
plt.plot(t_space, x_an_sol, '--', linewidth=2, label='analytical')
plt.plot(t_space, x_num_sol, linewidth=1, label='numerical')
plt.title('ODE 1st order IVP solved by TorchDiffEq')
plt.xlabel('t')
plt.ylabel('x')
plt.legend()
plt.show()
|
the-stack_0_2325 | #!/usr/bin/env python2
# coding: utf-8
import re
from collections import defaultdict
from pykit.dictutil import FixedKeysDict
from .block_id import BlockID
from .block_desc import BlockDesc
from .block_group_id import BlockGroupID
from .block_index import BlockIndex
from .replication_config import ReplicationConfig
class BlockGroupBaseError(Exception):
pass
class BlockNotFoundError(BlockGroupBaseError):
pass
class BlockExists(BlockGroupBaseError):
pass
class BlockTypeNotSupported(BlockGroupBaseError):
pass
class BlockTypeNotSupportReplica(BlockGroupBaseError):
pass
def _idcs(lst):
return list(lst)
def _blocks(blocks=None):
if blocks is None:
return {}
for idx, blk in blocks.items():
blocks[idx] = BlockDesc(blk)
return blocks
class BlockGroup(FixedKeysDict):
keys_default = dict(
block_group_id=BlockGroupID,
config=ReplicationConfig,
idcs=_idcs,
blocks=_blocks,
)
ident_keys = ('block_group_id',)
def __init__(self, *args, **kwargs):
super(BlockGroup, self).__init__(*args, **kwargs)
self.type_map = self.make_type_map()
def get_block_type(self, block_index):
mp = self.type_map
bi = BlockIndex(block_index)
try:
return mp[bi.i][bi.j]
except IndexError:
raise BlockTypeNotSupported('invalid index at {bi}'.format(bi=bi))
def make_type_map(self):
cnf = self['config']
nr_data, nr_parity = cnf['in_idc']
nr_in_idc, nr_xor_idc = cnf['cross_idc']
data_replica = cnf['data_replica']
rst = []
prefixes = ('d' * nr_in_idc
+ 'x' * nr_xor_idc)
for pref in prefixes:
o = [pref + '0'] * nr_data
o += [pref + 'p'] * nr_parity
for j in range(1, data_replica):
o += ['%s%d' % (pref, j)] * nr_data
rst.append(o)
return rst
def mark_delete_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
block.rm_ref()
if block.can_del():
block.mark_del()
return block
return None
def mark_delete_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
block.rm_ref()
if block.can_del():
block.mark_del()
return block
return None
def unlink_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
if not block.is_mark_del():
block.rm_ref()
if block.can_del():
del self['blocks'][str(block_index)]
return block
return None
def unlink_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
if not block.is_mark_del():
block.rm_ref()
if block.can_del():
del self['blocks'][block_id.block_index]
return block
return None
def delete_block(self, block_index):
return self.unlink_block(block_index)
def delete_block_byid(self, block_id):
return self.unlink_block_byid(block_id)
def has(self, block):
bid = block['block_id']
bidx = bid.block_index
existent = self['blocks'].get(bidx)
return existent == block
def link_block(self, block_index):
block = self.get_block(block_index, raise_error=True)
block.add_ref()
return block
def link_block_byid(self, block_id):
block = self.get_block_byid(block_id, raise_error=True)
block.add_ref()
return block
def add_block(self, new_block, replace=False, allow_exist=False):
if self.has(new_block) and allow_exist:
return new_block
bid = new_block['block_id']
bidx = bid.block_index
prev = self['blocks'].get(bidx)
if not replace and prev is not None:
raise BlockExists(
'there is already a block at {bid}'.format(bid=bid))
self['blocks'][bidx] = new_block
if prev is None:
return None
else:
return BlockDesc(prev)
def get_free_block_indexes(self, block_type=None, get_all=False):
free_block_index = defaultdict(list)
cnf = self['config']
n = sum(cnf['cross_idc'])
m = sum(cnf['in_idc'])
for i in range(n):
for j in range(m):
bi = BlockIndex(i, j)
typ = self.get_block_type(bi)
idc = self.get_block_idc(bi)
if get_all:
# set the key 'idc' with default if key not set
free_block_index[idc]
if block_type is not None and typ != block_type:
continue
if self.get_block(bi, raise_error=False) is None:
free_block_index[idc].append(str(bi))
return free_block_index
def get_block(self, block_index, raise_error=True):
bi = BlockIndex(block_index)
b = self['blocks'].get(str(bi))
if raise_error and b is None:
raise BlockNotFoundError(
'block_index:{bi}'
' not found in block_group:{block_group_id}'.format(bi=bi, **self))
return b
def get_block_idc(self, block_index):
bi = BlockIndex(block_index)
return self['idcs'][bi.i]
def get_primary_index(self, block_index):
nr_data, nr_parity = self['config']['in_idc']
bi = BlockIndex(block_index)
j = bi.j
if j >= nr_data:
j -= nr_parity
j %= nr_data
return BlockIndex(bi.i, j)
def get_replica_indexes(self, block_index, include_me=True):
nr_data, nr_parity = self['config']['in_idc']
data_replica = self['config']['data_replica']
bi = BlockIndex(block_index)
typ = self.get_block_type(bi)
if typ.endswith('p'):
raise BlockTypeNotSupportReplica(
'block type {typ}'
' does not support replica'.format(typ=typ))
pbi = self.get_primary_index(block_index)
rst = [str(pbi)]
for j in range(1, data_replica):
rbi = BlockIndex(pbi.i,
pbi.j + nr_parity + j * nr_data)
rst.append(str(rbi))
# if not include_me and str(block_index) in rst:
if not include_me:
rst.remove(str(block_index))
return rst
def classify_blocks(self, idc_index, only_primary=True):
nr_data, nr_parity = self['config']['in_idc']
ec = []
replica = []
mark_del = []
for i in range(0, nr_data):
bi = BlockIndex(idc_index, i)
blk = self.get_block(bi, raise_error=False)
if blk is None:
continue
if blk.is_mark_del():
mark_del.append(blk)
continue
replica_idxes = self.get_replica_indexes(bi, include_me=False)
rblks = self.indexes_to_blocks(replica_idxes)
if None in rblks:
ec.append(blk)
continue
replica.append(blk)
if only_primary:
continue
replica.extend(rblks)
return {'ec': ec, 'replica': replica, 'mark_del': mark_del}
def indexes_to_blocks(self, indexes):
blks = []
for idx in indexes:
bi = BlockIndex(idx)
blk = self.get_block(bi, raise_error=False)
blks.append(blk)
return blks
def get_parity_indexes(self, idc_index):
indexes = []
nr_data, nr_parity = self['config']['in_idc']
for i in range(nr_data, nr_data + nr_parity):
bi = BlockIndex(idc_index, i)
indexes.append(bi)
return indexes
def get_parities(self, idc_index):
idxes = self.get_parity_indexes(idc_index)
blks = self.indexes_to_blocks(idxes)
return [blk for blk in blks if blk is not None]
def is_ec_block(self, block_id):
block_id = BlockID(block_id)
blk = self.get_block(block_id.block_index, raise_error=False)
if blk is None or blk['block_id'] != block_id:
raise BlockNotFoundError(
'block_id:{bid}'
' not found in block_group:{block_group_id}'.format(bid=block_id, **self))
if block_id.type.endswith('p'):
blk = self.get_block(block_id.block_index, raise_error=True)
return True
r_indexes = self.get_replica_indexes(block_id.block_index)
r_blks = [self.get_block(x, raise_error=False) for x in r_indexes]
return None in r_blks
def get_blocks(self):
blks = []
for idx in sorted(self['blocks'].keys()):
blk = self['blocks'][idx]
blks.append(blk)
return blks
def get_ec_blocks(self, idc_idx):
nr_data, nr_parity = self['config']['in_idc']
blks = []
for i in range(0, nr_data + nr_parity):
blk = self.get_block(BlockIndex(idc_idx, i), raise_error=False)
if blk is None:
continue
if self.is_ec_block(blk['block_id']):
blks.append(blk)
return blks
def get_ec_broken_blocks(self, idc_idx, broken_bids):
broken_blks = []
for blk in self.get_ec_blocks(idc_idx):
if blk['block_id'] in broken_bids:
broken_blks.append(blk)
return broken_blks
def get_ec_block_ids(self, idc_idx):
bids = []
for blk in self.get_ec_blocks(idc_idx):
bids.append(blk['block_id'])
return bids
def get_replica_blocks(self, block_id, include_me=True, raise_error=True):
block_id = BlockID(block_id)
r_indexes = self.get_replica_indexes(block_id.block_index, True)
is_exist = False
blks = []
for idx in r_indexes:
blk = self.get_block(idx, raise_error=False)
if blk is None:
continue
if blk['block_id'] == block_id:
is_exist = True
if not include_me:
continue
blks.append(blk)
if not is_exist:
if raise_error:
raise BlockNotFoundError(self['block_group_id'], block_id)
else:
return None
return blks
def get_block_byid(self, block_id, raise_error=True):
block_id = BlockID(block_id)
blk = self.get_block(block_id.block_index, raise_error=False)
if blk is None or blk['block_id'] != block_id:
if raise_error:
raise BlockNotFoundError(self['block_group_id'], block_id)
else:
return None
return blk
def get_idc_blocks(self, idc_idx, is_del=None, types=None):
blks = []
for idx in sorted(self['blocks'].keys()):
blk = self['blocks'][idx]
idx = BlockIndex(idx)
typ = self.get_block_type(idx)
if types is not None and typ not in types:
continue
if idx.i != idc_idx:
continue
if is_del is not None and blk['is_del'] != is_del:
continue
blks.append(blk)
return blks
def get_idc_blocks_no_replica(self, idc_idx, is_del=None):
types = ['d0', 'dp', 'x0', 'xp']
return self.get_idc_blocks(idc_idx, is_del=is_del, types=types)
def get_d0_idcs(self):
cross_idc = self["config"]["cross_idc"]
return self["idcs"][:cross_idc[0]]
def get_dtype_by_idc(self, idc):
cfg = self["config"]
assert idc in self["idcs"]
assert sum(cfg["cross_idc"]) == len(self["idcs"])
d0_idcs = self["idcs"][:cfg["cross_idc"][0]]
if idc in d0_idcs:
return "d0"
else:
return "x0"
def get_idc_block_ids(self, idc_idx, is_del=None, types=None):
blks = self.get_idc_blocks(idc_idx, is_del=is_del, types=types)
return [BlockID(b['block_id']) for b in blks]
def get_idc_block_ids_no_replica(self, idc_idx, is_del=None):
types = ['d0', 'dp', 'x0', 'xp']
return self.get_idc_block_ids(idc_idx, is_del=is_del, types=types)
@classmethod
def is_data(cls, block_id):
return block_id.type in ('d0', 'x0')
@classmethod
def is_replica(cls, block_id):
return re.match(r'd[1-9]', block_id.type) is not None
@classmethod
def is_parity(cls, block_id):
return block_id.type in ('dp', 'xp')
|
the-stack_0_2326 | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Qt build rules."""
load(
"//:build_defs.bzl",
"cc_binary_mozc",
"cc_library_mozc",
"select_mozc",
)
load(
"//:config.bzl",
"MACOS_BUNDLE_ID_PREFIX",
"MACOS_MIN_OS_VER",
"QT_BIN_PATH",
)
load("@build_bazel_rules_apple//apple:macos.bzl", "macos_application")
def cc_qt_library_mozc(name, deps = [], **kwargs):
cc_library_mozc(
name = name,
deps = deps + select_mozc(
default = ["//third_party/qt:qt_native"],
oss_linux = ["@io_qt//:qt"],
oss_macos = ["@io_qt//:qt_mac"],
),
**kwargs
)
def cc_qt_binary_mozc(name, deps = [], **kwargs):
cc_binary_mozc(
name = name,
deps = deps + select_mozc(
default = ["//third_party/qt:qt_native"],
oss_linux = ["@io_qt//:qt"],
oss_macos = ["@io_qt//:qt_mac"],
),
**kwargs
)
def qt_moc_mozc(name, srcs, outs):
native.genrule(
name = name,
srcs = srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:moc) -p $$(dirname $<) -o $@ $(SRCS)",
oss = QT_BIN_PATH + "moc -p $$(dirname $<) -o $@ $(SRCS)",
),
tools = select_mozc(
default = ["//third_party/qt:moc"],
oss = [],
),
)
def qt_uic_mozc(name, srcs, outs):
native.genrule(
name = name,
srcs = srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:uic) -o $@ $(SRCS)",
oss = QT_BIN_PATH + "uic -o $@ $(SRCS)",
),
tools = select_mozc(
default = ["//third_party/qt:uic"],
oss = [],
),
)
def qt_rcc_mozc(name, qrc_name, qrc_file, srcs, outs):
native.genrule(
name = name,
srcs = [qrc_file] + srcs,
outs = outs,
cmd = select_mozc(
default = "$(location //third_party/qt:rcc) -o $@ -name " + qrc_name + " " + qrc_file,
oss = QT_BIN_PATH + "rcc -o $@ -name " + qrc_name + " $(location " + qrc_file + ")",
),
tools = select_mozc(
default = ["//third_party/qt:rcc"],
oss = [],
),
)
def macos_qt_application_mozc(name, bundle_name, deps):
macos_application(
name = name,
tags = ["manual"],
additional_contents = select_mozc(
default = {},
oss = {"@io_qt//:libqcocoa": "Resources"},
),
app_icons = ["//data/images/mac:product_icon.icns"],
bundle_id = MACOS_BUNDLE_ID_PREFIX + ".Tool." + bundle_name,
bundle_name = bundle_name,
infoplists = ["//gui:mozc_tool_info_plist"],
minimum_os_version = MACOS_MIN_OS_VER,
resources = [
"//data/images/mac:candidate_window_logo.tiff",
"//gui:qt_conf",
],
visibility = ["//:__subpackages__"],
deps = deps + select_mozc(
default = [],
oss = [
"@io_qt//:QtCore_mac",
"@io_qt//:QtGui_mac",
"@io_qt//:QtPrintSupport_mac",
"@io_qt//:QtWidgets_mac",
],
),
)
|
the-stack_0_2327 | from typing import Union
from pyrogram.types import Message, Audio, Voice
async def convert_count(count):
if int(count) == 1:
x = "First"
elif int(count) == 2:
x = "Second"
elif int(count) == 3:
x = "Third"
elif int(count) == 4:
x = "Fourth"
elif int(count) == 5:
x = "Fifth"
elif int(count) == 6:
x = "Sixth"
elif int(count) == 7:
x = "Seventh"
elif int(count) == 8:
x = "Eighth"
elif int(count) == 9:
x = "Ninth"
elif int(count) == 10:
x = "Tenth"
elif int(count) == 11:
x = "Eleventh"
elif int(count) == 12:
x = "Twelfth"
elif int(count) == 13:
x = "Thirteenth"
elif int(count) == 14:
x = "Fourteenth"
elif int(count) == 15:
x = "Fifteenth"
elif str(count) == "all":
x = "all"
return x
def get_url(message_1: Message) -> Union[str, None]:
messages = [message_1]
if message_1.reply_to_message:
messages.append(message_1.reply_to_message)
text = ""
offset = None
length = None
for message in messages:
if offset:
break
if message.entities:
for entity in message.entities:
if entity.type == "url":
text = message.text or message.caption
offset, length = entity.offset, entity.length
break
if offset in (None,):
return None
return text[offset:offset + length]
random_assistant = ["5", "1", "2", "3", "4"]
themes = ["LightBlue"]
def bytes(size: float) -> str:
"""humanize size"""
if not size:
return ""
power = 1024
t_n = 0
power_dict = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
t_n += 1
return "{:.2f} {}B".format(size, power_dict[t_n])
async def ass_det(assistant: int):
print(" 𝓡𝓲𝓭𝓱𝓪𝓶 𝓶𝓾𝓼𝓲𝓬𝓬")
|
the-stack_0_2328 | from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import os
import numpy as np
import tensorflow as tf
from modules.evaluations import get_val_data, perform_val
from modules.models import ArcFaceModel
from modules.utils import set_memory_growth, load_yaml, l2_norm
flags.DEFINE_string('cfg_path', './configs/arc_res50.yaml', 'config file path')
flags.DEFINE_string('gpu', '0', 'which gpu to use')
flags.DEFINE_string('img_path', '', 'path to input image')
def main(_argv):
gpus = tf.config.list_logical_devices('GPU')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = len(gpus)
logger = tf.get_logger()
logger.disabled = True
logger.setLevel(logging.FATAL)
cfg = load_yaml(FLAGS.cfg_path)
gpus = tf.config.list_logical_devices('GPU')
strategy = tf.distribute.MirroredStrategy(gpus)
with strategy.scope():
model = ArcFaceModel(size=cfg['input_size'],
backbone_type=cfg['backbone_type'],
training=False)
model.load_weights(cfg['pre_trained_model'])
print(model.summary())
print("[*] Loading LFW, AgeDB30 and CFP-FP...")
lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame = \
get_val_data(cfg['test_dataset'])
print("[*] Perform Evaluation on LFW...")
acc_lfw, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, lfw, lfw_issame,
is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_lfw, best_th))
print("[*] Perform Evaluation on AgeDB30...")
acc_agedb30, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, agedb_30,
agedb_30_issame, is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_agedb30, best_th))
print("[*] Perform Evaluation on CFP-FP...")
acc_cfp_fp, best_th = perform_val(
cfg['embd_shape'], cfg['batch_size'], model, cfp_fp, cfp_fp_issame,
is_ccrop=cfg['is_ccrop'])
print(" acc {:.4f}, th: {:.2f}".format(acc_cfp_fp, best_th))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
the-stack_0_2330 | # This file is execfile()d with the current directory set to its containing dir.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import shutil
# -- Path setup --------------------------------------------------------------
__location__ = os.path.dirname(__file__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, "../src"))
# -- Run sphinx-apidoc -------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/readthedocs/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/wai_data_tools")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
cmd_line = f"sphinx-apidoc --implicit-namespaces -f -o {output_dir} {module_dir}"
args = cmd_line.split(" ")
if tuple(sphinx.__version__.split(".")) >= ("1", "7"):
# This is a rudimentary parse_version to avoid external dependencies
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "wai_data_tools"
copyright = "2022, David Andersson"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# version: The short X.Y version.
# release: The full version, including alpha/beta/rc tags.
# If you don’t need the separation provided between version and release,
# just set them both to the same value.
try:
from wai_data_tools import __version__ as version
except ImportError:
version = ""
if not version or version.lower() == "unknown":
version = os.getenv("READTHEDOCS_VERSION", "unknown") # automatically set by RTD
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"sidebar_width": "300px",
"page_width": "1200px"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "wai_data_tools-doc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
# "preamble": "",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "user_guide.tex", "wai_data_tools Documentation", "David Andersson", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping --------------------------------------------------------
python_version = ".".join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/" + python_version, None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"setuptools": ("https://setuptools.readthedocs.io/en/stable/", None),
"pyscaffold": ("https://pyscaffold.org/en/stable", None),
}
print(f"loading configurations for {project} {version} ...", file=sys.stderr)
|
the-stack_0_2331 | import json
import urllib
import aiohttp
from aiocache import cached
client_session = None
@cached(ttl=3600)
async def get_ios_cfw():
"""Gets all apps on ios.cfw.guide
Returns
-------
dict
"ios, jailbreaks, devices"
"""
async with client_session.get("https://api.appledb.dev/main.json") as resp:
if resp.status == 200:
data = await resp.json()
return data
@cached(ttl=3600)
async def get_ipsw_firmware_info(version: str):
"""Gets all apps on ios.cfw.guide
Returns
-------
dict
"ios, jailbreaks, devices"
"""
async with client_session.get(f"https://api.ipsw.me/v4/ipsw/{version}") as resp:
if resp.status == 200:
data = await resp.json()
return data
return []
@cached(ttl=600)
async def get_dstatus_components():
async with client_session.get("https://discordstatus.com/api/v2/components.json") as resp:
if resp.status == 200:
components = await resp.json()
return components
@cached(ttl=600)
async def get_dstatus_incidents():
async with client_session.get("https://discordstatus.com/api/v2/incidents.json") as resp:
if resp.status == 200:
incidents = await resp.json()
return incidents
async def canister_search_package(query):
"""Search for a tweak in Canister's catalogue
Parameters
----------
query : str
"Query to search for"
Returns
-------
list
"List of packages that Canister found matching the query"
"""
async with client_session.get(f'https://api.canister.me/v1/community/packages/search?query={urllib.parse.quote(query)}&searchFields=identifier,name&responseFields=identifier,header,tintColor,name,price,description,packageIcon,repository.uri,repository.name,author,maintainer,latestVersion,nativeDepiction,depiction') as resp:
if resp.status == 200:
response = json.loads(await resp.text())
if response.get('status') == "Successful":
return response.get('data')
else:
return None
else:
return None
async def canister_search_repo(query):
"""Search for a repo in Canister's catalogue
Parameters
----------
query : str
"Query to search for"
Returns
-------
list
"List of repos that Canister found matching the query"
"""
async with client_session.get(f'https://api.canister.me/v1/community/repositories/search?query={urllib.parse.quote(query)}') as resp:
if resp.status == 200:
response = json.loads(await resp.text())
if response.get('status') == "Successful":
return response.get('data')
else:
return None
else:
return None
@cached(ttl=3600)
async def canister_fetch_repos():
async with client_session.get('https://api.canister.me/v1/community/repositories/search?ranking=1,2,3,4,5') as resp:
if resp.status == 200:
response = await resp.json(content_type=None)
return response.get("data")
return None
@cached(ttl=3600)
async def fetch_scam_urls():
async with client_session.get("https://raw.githubusercontent.com/SlimShadyIAm/Anti-Scam-Json-List/main/antiscam.json") as resp:
if resp.status == 200:
obj = json.loads(await resp.text())
return obj
async def init_client_session():
global client_session
client_session = aiohttp.ClientSession()
|
the-stack_0_2333 | import uuid
from django.db import models
from core.models.base import StandardModel
from core.models.base import CaseInsensitiveNamedModel
from core.models import Material
from core.models import Source
from core.models import SampleType
from core.models import Storage
from core.models import Project
from core.models import Note
from django.contrib.auth.models import User
from django.contrib.contenttypes import fields
from django.core.urlresolvers import reverse
from django.db import connection
from core import constants
from core import utils
from polymorphic.models import PolymorphicModel
import logging
logger = logging.getLogger(__name__)
class Sample(PolymorphicModel, CaseInsensitiveNamedModel):
STATUSES = utils.self_zip(constants.STANDARD_STATUSES)
sample_type = models.ForeignKey(SampleType)
material = models.ForeignKey(Material)
status = models.CharField(max_length=255,choices=STATUSES,default=constants.STATUS_ACTIVE)
owner = models.ForeignKey(User,null=True,blank=True)
source = models.ForeignKey(Source,null=True,blank=True)
lot = models.CharField(max_length=255, null=True, blank=True)
volume = models.CharField(max_length=255, null=True, blank=True)
concentration = models.CharField(max_length=255, null=True, blank=True)
concentration_units = models.CharField(max_length=255, null=True, blank=True)
project = models.ManyToManyField(Project,blank=True)
storage = models.ForeignKey(Storage,null=True, blank=True)
unit_count = models.CharField(max_length=255, null=True, blank=True)
notes = fields.GenericRelation(Note)
sample_links = models.ManyToManyField(
'self',
through='SampleToSample',
symmetrical=False,
related_name="linked_to",
blank=True
)
def _has_alert_note(self):
logger.debug('looking for alert note')
return self.notes.filter(note_type=constants.TYPE_ALERT).exists()
has_alert_note = property(_has_alert_note)
class Meta:
app_label = "core"
db_table = 'sample'
verbose_name_plural = 'samples'
unique_together = ("name",)
ordering = ['-date_created']
def save(self, *args, **kwargs):
if not self.name:
self.name = Sample.name_generator()
super(Sample, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('samples-detail', kwargs={'pk': self.pk})
def __str__(self):
return self.name
def add_sample_link(self, sample, link_type):
link, created = SampleToSample.objects.get_or_create(
source_sample=self,
target_sample=sample,
type=link_type
)
return link
def remove_sample_link(self, sample, link_type):
SampleToSample.objects.filter(
source_sample=self,
target_sample=sample,
type=link_type
).delete()
return
def get_sample_links(self, link_type):
return self.sample_links.filter(
target_samples__type=link_type,
target_samples__source_sample=self
)
def get_related_to(self, link_type):
return self.linked_to.filter(
source_samples__type=link_type,
source_samples__target_sample=self
)
def get_children(self):
logger.debug("in generic get children")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_CHILD)
return self.get_sample_links(link_type)
def get_parents(self):
logger.debug("in generic get parents")
link_type = SampleLink.objects.get(name=constants.LINK_TYPE_PARENT)
return self.get_related_to(link_type)
@classmethod
def name_generator(cls):
return "S-{0}".format(uuid.uuid4())
# get the next value in the sequence based on the record name
# record_1 would generate 2
# record_10 would generate 11
@staticmethod
def get_operational_index(value):
sql_string = """
select max(
to_number(
substring(name from char_length(%(value)s) + position(%(value)s in name)),
'999'
) + 1
) from sample
where name ~ (%(value)s || '[0-9]+$');
"""
index = 1
try:
cursor = connection.cursor()
cursor.execute(sql_string, {'value': value})
row = cursor.fetchone()
logger.debug(row)
index = row[0]
if index is None:
index = 1
except Exception as e:
logger.debug(e)
logger.debug("exception while looking up values")
index = 1
logger.debug("returning the following index {0}".format(index))
return index
Sample._meta.get_field('name').null = True
Sample._meta.get_field('name').blank = True
class SampleLink(StandardModel):
class Meta:
app_label = "core"
db_table = 'sample_link'
verbose_name_plural = 'sample links'
def __str__(self):
return self.name
class SampleToSample(models.Model):
source_sample = models.ForeignKey(Sample, related_name='source_samples')
target_sample = models.ForeignKey(Sample, related_name='target_samples')
type = models.ForeignKey(SampleLink)
class Meta:
app_label = "core"
db_table = 'sample_to_sample'
verbose_name_plural = 'sample to samples'
|
the-stack_0_2334 | # improvement_meta.py
# author: Ahmed Bin Zaman
# since: 02/2021
"""Module for improving the fitness of a conformation.
This module provides functionalities like local search to improve the
current fitness of a given conformation. The bookkeeping is for
metamorphic proteins with four native structures.
Available Classes:
- Improvement: Encapsulates the operations to improve fitness of a
conformation.
"""
import pyrosetta as pr
import math
class Improvement:
"""Encapsulates the operations to improve fitness of a conformation.
Provides functionalities like local search to improve the current
fitness of a given conformation.
Public Attributes:
- native_pose1: Contains the firts native pose provided in the
constructor (pyrosetta Pose object).
- native_pose2: Contains the firts native pose provided in the
constructor (pyrosetta Pose object).
- total_energy_evals: Total number of energy evaluations done in
all the operations performed (integer).
- last_op_energy_evals: Number of energy evaluations done in the
last operation performed (integer).
- min_ca_rmsd: Minimum Ca-RMSD value to the native conformation
among all the conformations generated in all the operations
(float)
- last_op_min_ca_rmsd: Minimum Ca-RMSD value to the native
conformation among all the conformations generated in the last
operation performed (float).
- min_ca_rmsd_pose: Conformation with minimum Ca-RMSD value to the
native conformation among all the conformations generated in all
the operations (pyrosetta Pose object).
- last_op_min_ca_rmsd_pose: Conformation with minimum Ca-RMSD value
to the native conformation among all the conformations generated
in the last operation performed (pyrosetta Pose object).
Available methods:
- local_search: Performs greedy local search to improve fitness of a
conformation.
"""
def __init__(self, native_pose1, native_pose2, native_pose3, native_pose4):
"""Constructor
Args:
native_pose: A pyrosetta Pose object containing the native
conformation. This is used for minimum Ca-RMSD
calculation. If you don't need this calculation, or
don't have the native conformation, just provide a
random Pose object.
"""
self.native_pose1 = pr.Pose()
self.native_pose1.assign(native_pose1)
self.native_pose2 = pr.Pose()
self.native_pose2.assign(native_pose2)
self.native_pose3 = pr.Pose()
self.native_pose3.assign(native_pose3)
self.native_pose4 = pr.Pose()
self.native_pose4.assign(native_pose4)
self.total_energy_evals = 0
self.last_op_energy_evals = 0
self.min_ca_rmsd1 = math.inf
self.last_op_min_ca_rmsd1 = math.inf
self.min_ca_rmsd_pose1 = pr.Pose()
self.last_op_min_ca_rmsd_pose1 = pr.Pose()
self.min_ca_rmsd2 = math.inf
self.last_op_min_ca_rmsd2 = math.inf
self.min_ca_rmsd_pose2 = pr.Pose()
self.last_op_min_ca_rmsd_pose2 = pr.Pose()
self.min_ca_rmsd3 = math.inf
self.last_op_min_ca_rmsd3 = math.inf
self.min_ca_rmsd_pose3 = pr.Pose()
self.last_op_min_ca_rmsd_pose3 = pr.Pose()
self.min_ca_rmsd4 = math.inf
self.last_op_min_ca_rmsd4 = math.inf
self.min_ca_rmsd_pose4 = pr.Pose()
self.last_op_min_ca_rmsd_pose4 = pr.Pose()
def local_search(self, pose, mover, score_function, successive_failures):
"""Performs greedy local search to improve fitness of a
conformation.
This local search performs specific moves to map a conformation
to a nearby local minimum in the energy surface. The search is
terminated when a specific number of moves fail to improve the
score based on a specific fitness function.
Args:
pose: A pyrosetta Pose object containing initial
conformation.
mover: A pyrosetta Mover object derermining the moves in
local search.
score_function: A pyrosetta ScoreFunction object for scoring
each move.
successive_failures: An int indicating the threshold for
consecutive number of failed moves in each trajectory.
Returns:
A pyrosetta Pose object containing the conformation with
locally minimum fitness.
"""
local_minima = pr.Pose()
local_minima.assign(pose)
new_pose = pr.Pose()
new_pose.assign(pose)
self.last_op_min_ca_rmsd1 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose1, new_pose
)
self.last_op_min_ca_rmsd2 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose2, new_pose
)
self.last_op_min_ca_rmsd3 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose3, new_pose
)
self.last_op_min_ca_rmsd4 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose4, new_pose
)
local_minima_score = score_function(local_minima)
self.last_op_energy_evals = 1
failed = 0
# Perform greedy local search
while failed < successive_failures:
mover.apply(new_pose)
pose_ca_rmsd1 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose1, new_pose
)
pose_ca_rmsd2 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose2, new_pose
)
pose_ca_rmsd3 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose3, new_pose
)
pose_ca_rmsd4 = pr.rosetta.core.scoring.CA_rmsd(
self.native_pose4, new_pose
)
if pose_ca_rmsd1 < self.last_op_min_ca_rmsd1:
self.last_op_min_ca_rmsd1 = pose_ca_rmsd1
self.last_op_min_ca_rmsd_pose1.assign(new_pose)
if pose_ca_rmsd2 < self.last_op_min_ca_rmsd2:
self.last_op_min_ca_rmsd2 = pose_ca_rmsd2
self.last_op_min_ca_rmsd_pose2.assign(new_pose)
if pose_ca_rmsd3 < self.last_op_min_ca_rmsd3:
self.last_op_min_ca_rmsd3 = pose_ca_rmsd3
self.last_op_min_ca_rmsd_pose3.assign(new_pose)
if pose_ca_rmsd4 < self.last_op_min_ca_rmsd4:
self.last_op_min_ca_rmsd4 = pose_ca_rmsd4
self.last_op_min_ca_rmsd_pose4.assign(new_pose)
current_score = score_function(new_pose)
self.last_op_energy_evals += 1
if current_score < local_minima_score:
local_minima.assign(new_pose)
local_minima_score = current_score
failed = 0
else:
failed += 1
# Bookkeeping
self.total_energy_evals += self.last_op_energy_evals
if self.last_op_min_ca_rmsd1 < self.min_ca_rmsd1:
self.min_ca_rmsd1 = self.last_op_min_ca_rmsd1
self.min_ca_rmsd_pose1.assign(self.last_op_min_ca_rmsd_pose1)
if self.last_op_min_ca_rmsd2 < self.min_ca_rmsd2:
self.min_ca_rmsd2 = self.last_op_min_ca_rmsd2
self.min_ca_rmsd_pose2.assign(self.last_op_min_ca_rmsd_pose2)
if self.last_op_min_ca_rmsd3 < self.min_ca_rmsd3:
self.min_ca_rmsd3 = self.last_op_min_ca_rmsd3
self.min_ca_rmsd_pose3.assign(self.last_op_min_ca_rmsd_pose3)
if self.last_op_min_ca_rmsd4 < self.min_ca_rmsd4:
self.min_ca_rmsd4 = self.last_op_min_ca_rmsd4
self.min_ca_rmsd_pose4.assign(self.last_op_min_ca_rmsd_pose4)
return local_minima
|
the-stack_0_2335 | import logging
from datetime import datetime
import xml.etree.ElementTree as ET
from indra.statements import *
from indra.statements.statements import Migration
from indra.statements.context import MovementContext
from indra.util import UnicodeXMLTreeBuilder as UTB
logger = logging.getLogger(__name__)
class CWMSError(Exception):
pass
POLARITY_DICT = {'CC': {'ONT::CAUSE': 1,
'ONT::INFLUENCE': 1},
'EVENT': {'ONT::INCREASE': 1,
'ONT::MODULATE': None,
'ONT::DECREASE': -1,
'ONT::INHIBIT': -1,
'ONT::TRANSFORM': None,
'ONT::STIMULATE': 1,
'ONT::ARRIVE': None,
'ONT::DEPART': None,
'ONT::MOVE': None,
'ONT::BE': None},
'EPI': {'ONT::ASSOCIATE': None}}
class CWMSProcessor(object):
"""The CWMSProcessor currently extracts causal relationships between
terms (nouns) in EKB. In the future, this processor can be extended to
extract other types of relations, or to extract relations involving
events.
For more details on the TRIPS EKB XML format, see
http://trips.ihmc.us/parser/cgi/drum
Parameters
----------
xml_string : str
A TRIPS extraction knowledge base (EKB) in XML format as a string.
Attributes
----------
tree : xml.etree.ElementTree.Element
An ElementTree object representation of the TRIPS EKB XML.
doc_id: str
Document ID
statements : list[indra.statements.Statement]
A list of INDRA Statements that were extracted from the EKB.
sentences : dict[str: str]
The list of all sentences in the EKB with their IDs
paragraphs : dict[str: str]
The list of all paragraphs in the EKB with their IDs
par_to_sec : dict[str: str]
A map from paragraph IDs to their associated section types
"""
def __init__(self, xml_string):
self.statements = []
# Parse XML
try:
self.tree = ET.XML(xml_string, parser=UTB())
except ET.ParseError:
logger.error('Could not parse XML string')
self.tree = None
return
# Get the document ID from the EKB tag.
self.doc_id = self.tree.attrib.get('id')
# Store all paragraphs and store all sentences in a data structure
paragraph_tags = self.tree.findall('input/paragraphs/paragraph')
sentence_tags = self.tree.findall('input/sentences/sentence')
self.paragraphs = {p.attrib['id']: p.text for p in paragraph_tags}
self.sentences = {s.attrib['id']: s.text for s in sentence_tags}
self.par_to_sec = {p.attrib['id']: p.attrib.get('sec-type')
for p in paragraph_tags}
# Keep a list of events that are part of relations and events
# subsumed by other events
self.relation_events = set()
self.subsumed_events = set()
# Keep a list of unhandled events for development purposes
self._unhandled_events = set()
self._preprocess_events()
def _preprocess_events(self):
events = self.tree.findall("EVENT/[type]")
for event in events:
affected = event.find("*[@role=':AFFECTED']")
if affected is not None:
affected_id = affected.attrib.get('id')
if affected_id:
self.subsumed_events.add(affected_id)
def extract_causal_relations(self):
"""Extract Influence Statements from the EKB."""
relations = self.tree.findall("CC/[type]")
for relation in relations:
st = self.influence_from_relation(relation)
if st:
self.statements.append(st)
events = self.tree.findall("EVENT/[type]")
for event in events:
st = self.influence_from_event(event)
if st:
self.statements.append(st)
# In some EKBs we get two redundant relations over the same arguments,
# we eliminate these
self._remove_multi_extraction_artifacts()
# Print unhandled event types
logger.debug('Unhandled event types: %s' %
(', '.join(sorted(self._unhandled_events))))
def extract_events(self):
"""Extract standalone Events from the EKB."""
events = [(1, self.tree.findall("EVENT/[type='ONT::INCREASE']")),
(-1, self.tree.findall("EVENT/[type='ONT::DECREASE']"))]
for polarity, event_list in events:
for event_term in event_list:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
event_id in self.relation_events:
continue
event = self.event_from_event(event_term)
if event:
# Here we set the polarity based on the polarity implied by
# the increase/decrease here
event.delta.set_polarity(polarity)
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_migrations(self, include_relation_arg=False):
ev_types = ['ONT::MOVE', 'ONT::DEPART', 'ONT::ARRIVE']
events = []
for et in ev_types:
evs = self.tree.findall("EVENT/[type='%s']" % et)
events += evs
for event_term in events:
event_id = event_term.attrib.get('id')
if event_id in self.subsumed_events or \
(not include_relation_arg and
event_id in self.relation_events):
continue
event = self.migration_from_event(event_term)
if event is not None:
self.statements.append(event)
self._remove_multi_extraction_artifacts()
def extract_correlations(self):
correlations = self.tree.findall("EPI/[type='ONT::ASSOCIATE']")
for cor in correlations:
st = self._association_from_element(cor, 'EPI', 'NEUTRAL1',
'NEUTRAL2', False)
if st:
self.statements.append(st)
# self._remove_multi_extraction_artifacts()
def _influence_from_element(self, element, element_type, subj_arg,
obj_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, subj_arg, obj_arg, is_arg)
if components is None:
return None
subj, obj, evidence, rel_type = components
# If the object polarity is not given explicitly, we set it
# based on the one implied by the relation
if obj.delta.polarity is None:
obj.delta.set_polarity(POLARITY_DICT[element_type][rel_type])
st = Influence(subj, obj, evidence=[evidence])
return st
def influence_from_relation(self, relation):
"""Return an Influence from a CC element in the EKB."""
return self._influence_from_element(relation, 'CC', 'FACTOR',
'OUTCOME', True)
def influence_from_event(self, event):
"""Return an Influence from an EVENT element in the EKB."""
return self._influence_from_element(event, 'EVENT', 'AGENT',
'AFFECTED', False)
def _statement_components_from_element(self, element, element_type,
member1_arg, member2_arg, is_arg):
element_id = element.attrib.get('id')
rel_type = element.find('type').text
if rel_type not in POLARITY_DICT[element_type]:
self._unhandled_events.add(rel_type)
return None
member1_id, member1_term = self._get_term_by_role(
element, member1_arg, is_arg)
member2_id, member2_term = self._get_term_by_role(
element, member2_arg, is_arg)
if member1_term is None or member2_term is None:
return None
member1 = self.get_event_or_migration(member1_term)
member2 = self.get_event_or_migration(member2_term)
if member1 is None or member2 is None:
return None
self.relation_events |= {member1_id, member2_id, element_id}
evidence = self._get_evidence(element)
return member1, member2, evidence, rel_type
def _association_from_element(self, element, element_type, member1_arg,
member2_arg, is_arg):
components = self._statement_components_from_element(
element, element_type, member1_arg, member2_arg, is_arg)
if components is None:
return None
member1, member2, evidence, _ = components
st = Association([member1, member2], evidence=[evidence])
return st
def event_from_event(self, event_term):
"""Return an Event from an EVENT element in the EKB."""
arg_id, arg_term = self._get_term_by_role(event_term, 'AFFECTED',
False)
if arg_term is None:
return None
# Make an Event statement if it is a standalone event
evidence = self._get_evidence(event_term)
event = self._get_event(arg_term, evidence=[evidence])
if event is None:
return None
event.context = self.get_context(event_term)
return event
def migration_from_event(self, event_term):
"""Return a Migration event from an EVENT element in the EKB."""
# First process at event level
migration_grounding = ('wm/concept/causal_factor/'
'social_and_political/migration')
concept_name = 'migration'
concept_db_refs = {'WM': migration_grounding}
# Get the element's text and use it to construct a Concept
element_text_element = event_term.find('text')
if element_text_element is not None:
element_text = element_text_element.text
concept_db_refs['TEXT'] = element_text
concept_name = sanitize_name(element_text)
concept = Concept(concept_name, db_refs=concept_db_refs)
evidence = self._get_evidence(event_term)
time = self._extract_time(event_term)
# Locations can be at different levels, keep expanding the list
locs = self._get_migration_locations(event_term)
neutral_id, neutral_term = self._get_term_by_role(event_term,
'NEUTRAL',
is_arg=False)
if neutral_term is not None:
locs = self._get_migration_locations(neutral_term, locs, 'origin')
# Arguments can be under AGENT or AFFECTED
agent_arg_id, agent_arg_term = self._get_term_by_role(
event_term, 'AGENT', False)
affected_arg_id, affected_arg_term = self._get_term_by_role(
event_term, 'AFFECTED', False)
if agent_arg_term is None and affected_arg_term is None:
context = MovementContext(locations=locs, time=time)
event = Migration(concept, context=context, evidence=[evidence])
return event
# If there are argument terms, extract more data from them
# Try to get the quantitative state associated with the event
size = None
for arg_term in [agent_arg_term, affected_arg_term]:
if arg_term is not None:
size_arg = arg_term.find('size')
if size_arg is not None and size_arg.attrib.get('id'):
size = self._get_size(size_arg.attrib['id'])
break
# Get more locations from arguments and inevents
if agent_arg_term is not None:
locs = self._get_migration_locations(
agent_arg_term, locs, 'destination')
inevent_term = self._get_inevent_term(agent_arg_term)
if inevent_term is not None:
locs = self._get_migration_locations(inevent_term, locs)
if time is None:
time = self._extract_time(inevent_term)
if size is None:
size = self._get_size_and_entity(inevent_term)
other_event_term = self._get_other_event_term(agent_arg_term)
if other_event_term is not None:
locs = self._get_migration_locations(other_event_term, locs)
if time is None:
time = self._extract_time(other_event_term)
if size is None:
size = self._get_size_and_entity(other_event_term)
if affected_arg_term is not None:
locs = self._get_migration_locations(
affected_arg_term, locs, 'destination')
context = MovementContext(locations=locs, time=time)
event = Migration(
concept, delta=size, context=context, evidence=[evidence])
return event
def _get_inevent_term(self, arg_term):
refset_arg = arg_term.find('refset')
if refset_arg is None:
return None
refset_id = refset_arg.attrib['id']
refset_term = self.tree.find("*[@id='%s']" % refset_id)
if refset_term is None:
return None
features = refset_term.find('features')
if features is None:
return None
inevent = features.find('inevent')
if inevent is None:
return None
inevent_id = inevent.attrib['id']
self.subsumed_events.add(inevent_id)
inevent_term = self.tree.find("*[@id='%s']" % inevent_id)
return inevent_term
def _get_other_event_term(self, arg_term):
refset_arg = arg_term.find('refset')
potential_events = self.tree.findall("EVENT/[type].//arg1/..") + \
self.tree.findall("EVENT/[type].//arg2/..")
for ev in potential_events:
arg1 = ev.find('arg1')
arg2 = ev.find('arg2')
for arg in [arg1, arg2]:
if arg is not None:
if refset_arg is not None:
if arg.attrib.get('id') == refset_arg.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
else:
# Refset might be on a different level
if arg.attrib.get('id'):
term = self.tree.find("*[@id='%s']" % arg.attrib['id'])
arg_refset_arg = term.find('refset')
if arg_refset_arg is not None:
if arg_refset_arg.attrib.get('id') == \
arg_term.attrib.get('id'):
event_id = ev.attrib['id']
self.subsumed_events.add(event_id)
event_term = self.tree.find("*[@id='%s']"
% event_id)
return event_term
return None
def _get_arg_event_term(self, term):
potential_args = term.findall('arg1') + term.findall('arg2')
for arg in potential_args:
if arg.attrib.get('id'):
new_term = self.tree.find("*[@id='%s']" % arg.attrib['id'])
if new_term is not None:
self.subsumed_events.add(new_term.attrib['id'])
return new_term
def _get_migration_locations(self, event_term, existing_locs=None,
default_role='unknown'):
if existing_locs is None:
existing_locs = []
new_locs = []
loc = self._extract_geoloc(event_term, arg_link='location')
if loc is not None:
new_locs.append({'location': loc,
'role': default_role})
loc = self._extract_geoloc(event_term, arg_link='to-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'destination'})
loc = self._extract_geoloc(event_term, arg_link='from-location')
if loc is not None:
new_locs.append({'location': loc,
'role': 'origin'})
for loc in new_locs:
if loc not in existing_locs:
existing_locs.append(loc)
return existing_locs
def _get_size(self, size_term_id):
size_term = self.tree.find("*[@id='%s']" % size_term_id)
value = size_term.find('value')
if value is None:
value = size_term.find('amount')
if value is not None:
mod = value.attrib.get('mod')
if mod and mod.lower() == 'almost':
mod = 'less_than'
value_txt = value.text
if value_txt is not None:
value_str = value.text.strip()
if value_str and not value_str.startswith('ONT') and \
not value_str.startswith('W'):
value = int(float(value_str))
else:
value = None
else:
value = None
unit = size_term.find('unit')
if unit is not None:
unit = unit.text.strip().lower()
else:
unit = 'absolute'
text = size_term.find('text').text
size = QuantitativeState(entity='person', value=value, unit=unit,
modifier=mod, text=text)
else:
size = None
return size
def _get_size_and_entity(self, event_term):
# For cases when entity (group) information and quantity are stored in
# different arguments and we can overwrite default 'person' entity
_, term1 = self._get_term_by_role(event_term, 'NEUTRAL', False)
_, term2 = self._get_term_by_role(event_term, 'NEUTRAL1', False)
size = None
if term1 is not None:
size_arg = term1.find('size')
if size_arg is not None and size_arg.attrib.get('id'):
size = self._get_size(size_arg.attrib['id'])
if size is not None and term2 is not None:
size.entity = term2.find('text').text
return size
def _get_term_by_role(self, term, role, is_arg):
"""Return the ID and the element corresponding to a role in a term."""
element = term.find("%s[@role=':%s']" % ('arg/' if is_arg else '*',
role))
if element is None:
return None, None
element_id = element.attrib.get('id')
if element_id is None:
return None, None
element_term = self.tree.find("*[@id='%s']" % element_id)
if element_term is None:
return None, None
return element_id, element_term
def _get_event(self, event_term, evidence=None):
"""Extract and Event from the given EKB element."""
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with_text(event_term)
# Get the element's text and use it to construct a Concept
element_text_element = event_term.find('text')
if element_text_element is None:
return None
element_text = element_text_element.text
if element_text is None:
return None
element_db_refs = {'TEXT': element_text.rstrip()}
element_name = sanitize_name(element_text.rstrip())
element_type_element = event_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
concept = Concept(element_name, db_refs=element_db_refs)
ev_type = event_term.find('type').text
polarity = POLARITY_DICT['EVENT'].get(ev_type)
delta = QualitativeDelta(polarity=polarity)
context = self.get_context(event_term)
event_obj = Event(concept, delta=delta, context=context,
evidence=evidence)
return event_obj
def _get_wm_grounding(self, element):
wm_gr = None
wm_type_element = element.find('wm-type')
if wm_type_element is not None:
grounding_element = wm_type_element.find('grounding')
if grounding_element is not None:
wm_gr = (grounding_element.text, 0.7)
return wm_gr
def _add_start_end(self, term, starts, ends):
start = term.attrib.get('start')
end = term.attrib.get('end')
if start:
starts.append(int(start))
if end:
ends.append(int(end))
return starts, ends
def get_event_or_migration(self, event_term):
#if event_term.find('type').text in [
# 'ONT::MOVE', 'ONT::DEPART', 'ONT::ARRIVE']:
# return self.migration_from_event(event_term)
#else:
return self._get_event(event_term)
def get_context(self, element):
time = self._extract_time(element)
geoloc = self._extract_geoloc(element)
if time or geoloc:
context = WorldContext(time=time, geo_location=geoloc)
else:
context = None
return context
def _extract_time(self, term):
time = term.find('time')
if time is None:
time = term.find('features/time')
if time is None:
return None
time_id = time.attrib.get('id')
time_term = self.tree.find("*[@id='%s']" % time_id)
if time_term is None:
return None
text = sanitize_name(time_term.findtext('text'))
timex = time_term.find('timex')
if timex is not None:
start = self._process_timex(timex)
if start is not None:
time_context = TimeContext(text=text, start=start)
else:
time_context = TimeContext(text=text)
else:
start = None
end = None
from_time_el = time_term.find('from-time')
to_time_el = time_term.find('to-time')
if from_time_el is not None:
from_time_id = from_time_el.attrib.get('id')
from_time_term = self.tree.find("*[@id='%s']" % from_time_id)
if time_term is not None:
timex = from_time_term.find('timex')
if timex is not None:
start = self._process_timex(timex)
if to_time_el is not None:
to_time_id = to_time_el.attrib.get('id')
to_time_term = self.tree.find("*[@id='%s']" % to_time_id)
if to_time_term is not None:
timex = to_time_term.find('timex')
if timex is not None:
end = self._process_timex(timex)
if start and end:
duration = int((end - start).total_seconds())
else:
duration = None
time_context = TimeContext(
text=text, start=start, end=end, duration=duration)
return time_context
@staticmethod
def _process_timex(timex):
year = timex.findtext('year')
month = timex.findtext('month')
day = timex.findtext('day')
if year or month or day:
try:
year = int(year)
except Exception:
year = None
try:
# Month can be represented either by name, short name or
# number (October, Oct or 10)
month = int(month)
except Exception:
try:
month = datetime.strptime(month, '%B').month
except Exception:
try:
month = datetime.strptime(month, '%b').month
except Exception:
month = 1
try:
day = int(day)
except Exception:
day = 1
if year and month and day:
time = datetime(year, month, day)
return time
return None
def _extract_geoloc(self, term, arg_link='location'):
"""Get the location from a term (CC or TERM)"""
loc = term.find(arg_link)
if loc is None:
return None
loc_id = loc.attrib.get('id')
loc_term = self.tree.find("*[@id='%s']" % loc_id)
if loc_term is None:
return None
text = loc_term.findtext('text')
grounding = loc_term.find('grounding')
db_refs = {}
if grounding is not None:
places = grounding.findall('place')
for place in places:
nsid = place.attrib.get('id')
db_ns, db_id = nsid.split(':')
if db_ns == 'GNO':
db_ns = 'GEOID'
# TODO: name spaces are sometimes repeated in the EKB, here we
# silently overwrite a key if it already exists
db_refs[db_ns] = db_id
# name = loc_term.findtext('name')
geoloc_context = RefContext(name=text, db_refs=db_refs)
return geoloc_context
def _get_assoc_with_text(self, element_term):
# NOTE: there could be multiple assoc-withs here that we may
# want to handle
assoc_with = element_term.find('assoc-with')
if assoc_with is not None:
# We first identify the ID of the assoc-with argument
assoc_with_id = assoc_with.attrib.get('id')
# In some cases the assoc-with has no ID but has a type
# defined in place that we can get
if assoc_with_id is None:
assoc_with_grounding = assoc_with.find('type').text
return assoc_with_grounding
# If the assoc-with has an ID then find the TERM
# corresponding to it
assoc_with_term = self.tree.find("*[@id='%s']" % assoc_with_id)
if assoc_with_term is not None:
# We then get the grounding for the term
assoc_with_grounding = assoc_with_term.find('type').text
return assoc_with_grounding
return None
def _get_assoc_with_term(self, element_term):
assoc_with = element_term.find('assoc-with')
if assoc_with is not None:
assoc_with_id = assoc_with.attrib.get('id')
if assoc_with_id is not None:
assoc_with_term = self.tree.find("*[@id='%s']" % assoc_with_id)
return assoc_with_term
def _get_evidence(self, event_tag):
text = self._get_evidence_text(event_tag)
sec = self._get_section(event_tag)
epi = {'direct': False}
if sec:
epi['section_type'] = sec
ev = Evidence(source_api='cwms', text=text, pmid=self.doc_id,
epistemics=epi)
return ev
def _get_evidence_text(self, event_tag):
"""Extract the evidence for an event.
Pieces of text linked to an EVENT are fragments of a sentence. The
EVENT refers to the paragraph ID and the "uttnum", which corresponds
to a sentence ID. Here we find and return the full sentence from which
the event was taken.
"""
par_id = event_tag.attrib.get('paragraph')
uttnum = event_tag.attrib.get('uttnum')
event_text = event_tag.find('text')
if self.sentences is not None and uttnum is not None:
sentence = self.sentences[uttnum]
elif event_text is not None:
sentence = event_text.text
else:
sentence = None
return sentence
def _get_section(self, event_tag):
par_id = event_tag.attrib.get('paragraph')
sec = self.par_to_sec.get(par_id)
return sec
def _remove_multi_extraction_artifacts(self):
# Build up a dict of evidence matches keys with statement UUIDs
evmks = {}
logger.debug('Starting with %d Statements.' % len(self.statements))
for stmt in self.statements:
if isinstance(stmt, Event):
evmk = stmt.evidence[0].matches_key() + \
stmt.concept.matches_key()
elif isinstance(stmt, Influence):
evmk = (stmt.evidence[0].matches_key() +
stmt.subj.matches_key() + stmt.obj.matches_key())
elif isinstance(stmt, Association):
evmk = (stmt.evidence[0].matches_key() +
stmt.members[0].matches_key() +
stmt.members[1].matches_key())
if evmk not in evmks:
evmks[evmk] = [stmt.uuid]
else:
evmks[evmk].append(stmt.uuid)
# This is a list of groups of statement UUIDs that are redundant
multi_evmks = [v for k, v in evmks.items() if len(v) > 1]
# We now figure out if anything needs to be removed
to_remove = []
# Remove redundant statements
for uuids in multi_evmks:
# Influence statements to be removed
infl_stmts = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Influence))]
infl_stmts = sorted(infl_stmts, key=lambda x: x.polarity_count(),
reverse=True)
to_remove += [s.uuid for s in infl_stmts[1:]]
# Association statements to be removed
assn_stmts = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Association))]
assn_stmts = sorted(assn_stmts, key=lambda x: x.polarity_count(),
reverse=True)
# Standalone events to be removed
events = [s for s in self.statements if (
s.uuid in uuids and isinstance(s, Event))]
events = sorted(events, key=lambda x: event_delta_score(x),
reverse=True)
to_remove += [e.uuid for e in events[1:]]
# Remove all redundant statements
if to_remove:
logger.debug('Found %d Statements to remove' % len(to_remove))
self.statements = [s for s in self.statements
if s.uuid not in to_remove]
class CWMSProcessorCompositional(CWMSProcessor):
def _get_event(self, event_term, evidence=None):
"""Extract and Event from the given EKB element."""
# Now see if there is a modifier like assoc-with connected
# to the main concept
assoc_with = self._get_assoc_with_text(event_term)
# We're using a union of texts from multiple terms instead
# Get the element's text and use it to construct a Concept
# element_text_element = event_term.find('text')
# if element_text_element is None:
# return None
# element_text = element_text_element.text
# element_db_refs = {'TEXT': element_text}
# element_name = sanitize_name(element_text)
element_db_refs = {}
par = event_term.attrib['paragraph']
starts, ends = self._add_start_end(event_term, [], [])
element_type_element = event_term.find('type')
if element_type_element is not None:
element_db_refs['CWMS'] = element_type_element.text
# If there's an assoc-with, we tack it on as extra grounding
if assoc_with is not None:
element_db_refs['CWMS'] += ('|%s' % assoc_with)
theme_gr, theme_prop_gr, theme_proc_gr, theme_proc_prop_gr = \
None, None, None, None
# Grounding can be provided on multiple levels
theme_gr = self._get_wm_grounding(event_term)
if not theme_gr:
arg_term = self._get_arg_event_term(event_term)
if arg_term is not None:
starts, ends = self._add_start_end(arg_term, starts, ends)
assoc_term = self._get_assoc_with_term(arg_term)
if assoc_term is not None:
starts, ends = self._add_start_end(
assoc_term, starts, ends)
new_arg_term = self._get_arg_event_term(assoc_term)
# Theme grounding is usually at the "deepest" level
if new_arg_term is not None:
starts, ends = self._add_start_end(
new_arg_term, starts, ends)
theme_gr = self._get_wm_grounding(new_arg_term)
theme_proc_gr = self._get_wm_grounding(assoc_term)
theme_proc_prop_gr = self._get_wm_grounding(arg_term)
else:
theme_gr = self._get_wm_grounding(assoc_term)
extra_gr = self._get_wm_grounding(arg_term)
# This can be process or property, look at ontology
if extra_gr:
if 'process' in extra_gr[0]:
theme_proc_gr = extra_gr
else:
theme_prop_gr = extra_gr
# Get a union of all texts
element_text = self.paragraphs[par][min(starts): max(ends)].rstrip()
element_db_refs['TEXT'] = element_text
element_name = sanitize_name(element_text)
# Promote process grounding to theme if theme is missing
if not theme_gr and theme_proc_gr:
theme_gr = theme_proc_gr
theme_proc_gr = None
# Drop process property grounding in process is missing
if not theme_proc_gr:
theme_proc_prop_gr = None
# Only add WM grounding if there's a theme grounding
if theme_gr:
element_db_refs['WM'] = [(theme_gr, theme_prop_gr, theme_proc_gr,
theme_proc_prop_gr)]
concept = Concept(element_name, db_refs=element_db_refs)
ev_type = event_term.find('type').text
polarity = POLARITY_DICT['EVENT'].get(ev_type)
delta = QualitativeDelta(polarity=polarity)
context = self.get_context(event_term)
event_obj = Event(concept, delta=delta, context=context,
evidence=evidence)
return event_obj
def sanitize_name(txt):
name = txt.replace('\n', '')
return name
def event_delta_score(stmt):
if stmt.delta is None:
return 0
pol_score = 1 if stmt.delta.polarity is not None else 0
if isinstance(stmt.delta, QualitativeDelta):
adj_score = len(stmt.delta.adjectives)
return (pol_score + adj_score)
if isinstance(stmt.delta, QuantitativeState):
value_score = 1 if stmt.delta.value is not None else 0
return (pol_score + value_score)
|
the-stack_0_2337 | import logging
import re
import typing as t
import requests
from . import const
HOST = "https://{endpoint}.api.pvp.net"
# Commonly used types (for type hints)
Params = t.Dict[str, str]
JSON = t.Dict[str, t.Any]
l = logging.getLogger(__name__)
api_key = None # type: str
###################################################################################################
def _build_url(url_base: str, region: str, **kwargs: t.Any):
if url_base.startswith("/"):
url_base = HOST + url_base
kwargs.setdefault('endpoint', region)
kwargs.setdefault('region', region)
kwargs.setdefault('platform', const.Platform[region])
return url_base.format(**kwargs)
def _get_data(url: str, params: Params = None) -> JSON:
if not params:
params = {}
params.setdefault('api_key', api_key)
l.debug("Requesting '%s' with params: %s", url, params)
r = requests.get(url, params=params)
return r.json()
def _staticdata(variant: str, params: Params = None, region="euw") -> JSON:
url = _build_url("/api/lol/static-data/{region}/v1.2/{variant}",
region=region, endpoint='global', variant=variant)
return _get_data(url, params)
def _standardize_summoner_name(summoner_name: str) -> str:
# The standardized summoner name
# is the summoner name in all lower case
# and with spaces removed.
return re.sub(r"\s", "", summoner_name.lower())
###################################################################################################
def set_key(key: str):
global api_key
api_key = key
def format_status(data: JSON) -> str:
return "Status code: {status_code}, message: {message}".format(**data['status'])
def get_champions(params: Params = None) -> JSON:
return _staticdata("champion", params)
def get_versions() -> JSON:
return _staticdata("versions")
def get_summoner_id(region: str, summoner_name: str) -> t.Optional[int]:
"""Determine ID of a summoner by name.
Returns None if summoner name is not found.
"""
standardized_name = _standardize_summoner_name(summoner_name)
url = _build_url("/api/lol/{region}/v1.4/summoner/by-name/{summoner_name}",
region=region, summoner_name=standardized_name)
result = _get_data(url)
if standardized_name not in result:
return None
else:
return result[standardized_name]['id']
def get_current_game_info(region: str, summoner_id: int) -> t.Optional[JSON]:
url = _build_url("/observer-mode/rest/consumer/getSpectatorGameInfo/{platform}/{summoner_id}",
region=region, summoner_id=summoner_id)
result = _get_data(url)
if 'status' in result:
if result['status']['status_code'] == 404: # not in-game
return None
else:
l.error("Non-standard result! %s", format_status(result))
return None
else:
return result
|
the-stack_0_2338 | #!/usr/bin/env python3
import random
import os
import asyncpg
from quart import Quart, jsonify, make_response, request, render_template
app = Quart(__name__)
GET_WORLD = "select id,randomnumber from world where id = $1"
UPDATE_WORLD = "update world set randomNumber = $2 where id = $1"
@app.before_serving
async def connect_to_db():
app.db = await asyncpg.create_pool(
user=os.getenv("PGUSER", "benchmarkdbuser"),
password=os.getenv("PGPASS", "benchmarkdbpass"),
database="hello_world",
host="tfb-database",
port=5432,
)
@app.after_serving
async def disconnect_from_db():
await app.db.close()
@app.route("/json")
async def json():
return {"message": "Hello, World!"}
@app.route("/plaintext")
async def plaintext():
response = await make_response(b"Hello, World!")
# Quart assumes string responses are 'text/html', so make a custom one
response.mimetype = "text/plain"
return response
@app.route("/db")
async def db():
async with app.db.acquire() as conn:
key = random.randint(1, 10000)
number = await conn.fetchval(GET_WORLD, key)
return jsonify({"id": key, "randomNumber": number})
def get_query_count(args):
qc = args.get("queries")
if qc is None:
return 1
try:
qc = int(qc)
except ValueError:
return 1
qc = max(qc, 1)
qc = min(qc, 500)
return qc
@app.route("/queries")
async def queries():
queries = get_query_count(request.args)
worlds = []
async with app.db.acquire() as conn:
pst = await conn.prepare(GET_WORLD)
for _ in range(queries):
key = random.randint(1, 10000)
number = await pst.fetchval(key)
worlds.append({"id": key, "randomNumber": number})
return jsonify(worlds)
@app.route("/updates")
async def updates():
queries = get_query_count(request.args)
new_worlds = []
async with app.db.acquire() as conn, conn.transaction():
pst = await conn.prepare(GET_WORLD)
for _ in range(queries):
key = random.randint(1, 10000)
old_number = await pst.fetchval(key)
new_number = random.randint(1, 10000)
new_worlds.append((key, new_number))
await conn.executemany(UPDATE_WORLD, new_worlds)
return jsonify(
[{"id": key, "randomNumber": new_number} for key, new_number in new_worlds]
)
@app.route("/fortunes")
async def fortunes():
async with app.db.acquire() as conn:
rows = await conn.fetch("select * from fortune")
rows.append((0, "Additional fortune added at request time."))
rows.sort(key=lambda row: row[1])
return await render_template("fortunes.html", fortunes=rows)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
the-stack_0_2339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib as ml
import matplotlib.pyplot as plt
import _settings
import os.path
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import colors, ticker, cm
from math import log10
def logspace_for(data, n=20):
"""log10-spaced bins for some data"""
data = np.asarray(data)
top = data.max()
bot = data.min()
logtop = log10(top)
logbot = log10(bot)
logspace = np.linspace(logbot, logtop, n, endpoint=True)
space = 10.0 ** logspace
space[0]=bot
space[-1]=top
return space
def contour_cum_histo(x, y,
title="",
xlabel="",
ylabel="",
n_bins=50,
n_levels=10,
x_cap_pc=100.0,
y_cap_pc=100.0,
cmap=None):
"histogram with approx equal-occupancy contour lines"
if cmap is None: cmap = plt.cm.bone_r
x_cap = np.percentile(x, x_cap_pc)
y_cap = np.percentile(y, y_cap_pc)
mask = (x<=x_cap) & (y<=y_cap)
x_capped = x[mask]
y_capped = y[mask]
H, xedges, yedges = np.histogram2d(
x_capped, y_capped,
bins=(n_bins, n_bins),
normed=True)
H_sorted = np.sort(H.flatten())
H_cum = H_sorted.cumsum()
# more precise version at https://gist.github.com/adrn/3993992
levels = H_sorted[H_cum.searchsorted(np.linspace(1.0/n_levels*H_cum[-1], H_cum[-1], n_levels, endpoint=True))]
level_labels = np.linspace(0, 100.0*(1-1.0/n_levels), n_levels, endpoint=True)
#lowest_bin = np.percentile(H[H>0].flatten(), 5.0) #Ignore bottom 5%
#levels = np.power(10,np.arange(np.ceil(np.log(lowest_bin)),np.ceil(np.log(H.max())), 0.5))
#levels = np.concatenate([[0.0], levels])
#levels = np.percentile(H.flatten(), np.linspace(0.0, 100.0, n_levels, endpoint=True))
#extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]] #axes transposed for histograms
fig = plt.figure()
ax = plt.gca()
#points = plt.scatter(
# y_capped, x_capped,
# marker="x"
#)
cset = plt.contourf(H,
levels=levels,
cmap=cmap,
#origin='lower',
#colors=['black','green','blue','red'],
#locator=ticker.LogLocator(),
#linewidths=(1.9, 1.6, 1.5, 1.4),
extent=extent
)
fset = plt.contour(H,
levels=levels,
#origin='lower',
colors=['red'],
#locator=ticker.LogLocator(),
#linewidths=(1.9, 1.6, 1.5, 1.4),
extent=extent,
hold='on'
)
# Make a colorbar for the ContourSet returned by the contourf call.
#cbar = plt.colorbar(cset)
#cbar.ax.set_ylabel('verbosity coefficient')
# Add the contour line levels to the colorbar
#cbar.add_lines(fset)
#plt.clabel(cset, inline=1, fontsize=10, fmt='%1.0i')
#for c in cset.collections:
# c.set_linestyle(‘solid’)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig, ax, cset, fset #, cbar
def counts_for(timestamps):
"small helper to create an index vector"
return np.arange(1, timestamps.size+1)
def plot_timestamps(timestamps, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return plt.plot(timestamps, counts_for(timestamps), **kwargs)
def plot_point_ts_series(tseries, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return plt.plot(tseries.index, tseries, **kwargs)
def plot_ts(ts_frame, ax=None, **kwargs):
ax = ax if ax else plt.gca()
return ax.plot(ts_frame.run_time, ts_frame.view_count, **kwargs)
def multisave(basename, fig=None, dpi=300, **kwargs):
basedir = getattr(_settings, 'FIGURES', None)
fig = fig if fig else plt.gcf()
if basedir:
basename = os.path.join(basedir, basename)
#Aggressively prevent file handle leakage
with open(basename + ".png", "w") as h:
fig.savefig(h, format="png", dpi=dpi)
with open(basename + ".pdf", "w") as h:
fig.savefig(h, format="pdf")
with open(basename + ".svg", "w") as h:
fig.savefig(h, format="svg")
#return fig
def plot_ts_rates(ts_frame, ax=None,
title=None,
scale=3600*24, **kwargs):
ax = ax if ax else plt.gca()
vid = ts_frame.iloc[0,0]
if title is None:
title = "Estimated rate for {!r}".format(vid)
ax.step(
pd.to_datetime(ts_frame.run_time[1:] * scale, unit='s'),
ts_frame.rate[1:],
**kwargs)
#ax.set_xlabel('time')
ax.set_ylabel('approx. intensity (views/day)')
ax.set_title(title)
ax.figure.autofmt_xdate()
return ax
def diagnose_ts(ts_frame, **kwargs):
fig, axes = plt.subplots(nrows=1, ncols=2)
ax = axes[0]
ax.plot(x, y, 'r')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('title')
fig.tight_layout()
return fig, axes
|
the-stack_0_2341 | import pandas as pd
import numpy as np
import random
import logging
import cv2
import sys
sys.path.append("../DataProcessing/")
from ImageTransformer import ImageTransformer
class DataProcessor:
@staticmethod
def ProcessTrainData(trainPath, image_height, image_width, isGray = False, isExtended=False):
"""Reads the .pickle file and converts it into a format suitable fot training
Parameters
----------
trainPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
isExtended : bool, optional
True if the dataset contains both head and hand pose and you wish to retrieve both
Returns
-------
list
list of video frames and list of labels (poses)
"""
train_set = pd.read_pickle(trainPath).values
logging.info('[DataProcessor] train shape: ' + str(train_set.shape))
size = len(train_set[:, 0])
n_val = int(float(size) * 0.2)
#n_val = 13000
np.random.seed(100)
# split between train and test sets:
x_train = train_set[:, 0]
x_train = np.vstack(x_train[:]).astype(np.float32)
if isGray == True:
x_train = np.reshape(x_train, (-1, image_height, image_width, 1))
else:
x_train = np.reshape(x_train, (-1, image_height, image_width, 3))
x_train= np.swapaxes(x_train, 1, 3)
x_train = np.swapaxes(x_train, 2, 3)
y_train = train_set[:, 1]
y_train = np.vstack(y_train[:]).astype(np.float32)
ix_val, ix_tr = np.split(np.random.permutation(train_set.shape[0]), [n_val])
x_validation = x_train[ix_val, :]
x_train = x_train[ix_tr, :]
y_validation = y_train[ix_val, :]
y_train = y_train[ix_tr, :]
shape_ = len(x_train)
sel_idx = random.sample(range(0, shape_), k=(size-n_val))
#sel_idx = random.sample(range(0, shape_), k=50000)
x_train = x_train[sel_idx, :]
y_train = y_train[sel_idx, :]
if isExtended == True:
z_train = train_set[:, 2]
z_train = np.vstack(z_train[:]).astype(np.float32)
z_validation = z_train[ix_val, :]
z_train = z_train[ix_tr, :]
z_train = z_train[sel_idx, :]
return [x_train, x_validation, y_train, y_validation, z_train, z_validation]
return [x_train, x_validation, y_train, y_validation]
@staticmethod
def ProcessTestData(testPath, image_height, image_width, isGray = False, isExtended=False):
"""Reads the .pickle file and converts it into a format suitable fot testing
Parameters
----------
testPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
isExtended : bool, optional
True if the dataset contains both head and hand pose and you wish to retrieve both
Returns
-------
list
list of video frames and list of labels (poses)
"""
test_set = pd.read_pickle(testPath).values
logging.info('[DataProcessor] test shape: ' + str(test_set.shape))
x_test = test_set[:, 0]
x_test = np.vstack(x_test[:]).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = test_set[:, 1]
y_test = np.vstack(y_test[:]).astype(np.float32)
if isExtended ==True:
z_test = test_set[:, 2]
z_test = np.vstack(z_test[:]).astype(np.float32)
return [x_test, y_test, z_test]
return [x_test, y_test]
@staticmethod
def ExtractValidationLabels(testPath, image_height, image_width, isGray = False):
"""Reads the .pickle file and converts it into a format suitable for testing on pulp
You need to create a folder called test though
Parameters
----------
testPath : str
The file location of the .pickle
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
"""
test_set = pd.read_pickle(testPath).values
logging.info('[DataProcessor] test shape: ' + str(test_set.shape))
x_test = test_set[:, 0]
x_test = np.vstack(x_test[:]).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = test_set[:, 1]
y_test = np.vstack(y_test[:]).astype(np.float32)
f = open("test/labels.txt", "w")
for i in range(0, len(x_test)):
data = x_test[i]
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 0, 1)
data = np.reshape(data, (60, 108))
img = np.zeros((244, 324), np.uint8)
img[92:152, 108:216] = data
cv2.imwrite("test/{}.pgm".format(i), img)
label = y_test[i]
f.write("{},{},{},{}\n".format(label[0], label[1],label[2],label[3]))
f.close()
@staticmethod
def ProcessInferenceData(images, image_height, image_width, isGray=False):
"""Converts a list of images into a format suitable fot inference
Parameters
----------
images : list
list of images
image_height : int
Please...
image_width : int
Please...
isGray : bool, optional
True is the dataset is of 1-channel (gray) images, False if RGB
Returns
-------
list
list of video frames and list of labels (poses, which are garbage)
"""
x_test = np.stack(images, axis=0).astype(np.float32)
if isGray == True:
x_test = np.reshape(x_test, (-1, image_height, image_width, 1))
else:
x_test = np.reshape(x_test, (-1, image_height, image_width, 3))
x_test = np.swapaxes(x_test, 1, 3)
x_test = np.swapaxes(x_test, 2, 3)
y_test = [0, 0, 0, 0] * len(x_test)
y_test = np.vstack(y_test[:]).astype(np.float32)
y_test = np.reshape(y_test, (-1, 4))
return [x_test, y_test]
@staticmethod
def CreateGreyPickle(trainPath, image_height, image_width, file_name):
"""Converts Dario's RGB dataset to a gray + vignette dataset
Parameters
----------
images : list
list of images
image_height : int
Please...
image_width : int
Please...
file_name : str
name of the new .pickle
"""
train_set = pd.read_pickle(trainPath).values
logging.info('[DataProcessor] train shape: ' + str(train_set.shape))
# split between train and test sets:
x_train = train_set[:, 0]
x_train = np.vstack(x_train[:])
x_train = np.reshape(x_train, (-1, image_height, image_width, 3))
it = ImageTransformer()
x_train_grey = []
sigma = 50
mask = it.GetVignette(image_width, image_width, sigma)
for i in range(len(x_train)):
gray_image = cv2.cvtColor(x_train[i], cv2.COLOR_RGB2GRAY)
gray_image = gray_image * mask[24:84, 0:108]
gray_image = gray_image.astype(np.uint8)
x_train_grey.append(gray_image)
y_train = train_set[:, 1]
df = pd.DataFrame(data={'x': x_train_grey, 'y': y_train})
df.to_pickle(file_name)
|
the-stack_0_2342 | # engine/reflection.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
import contextlib
from .base import Connectable
from .base import Connection
from .base import Engine
from .. import exc
from .. import inspection
from .. import sql
from .. import util
from ..sql import operators
from ..sql import schema as sa_schema
from ..sql.type_api import TypeEngine
from ..util import topological
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get("info_cache", None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, util.string_types)),
tuple((k, v) for k, v in kw.items() if k != "info_cache"),
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
@inspection._self_inspects
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the reflection methods of the
:class:`~sqlalchemy.engine.interfaces.Dialect`, providing a
consistent interface as well as caching support for previously
fetched metadata.
A :class:`.Inspector` object is usually created via the
:func:`.inspect` function, which may be passed an :class:`.Engine`
or a :class:`.Connection`::
from sqlalchemy import inspect, create_engine
engine = create_engine('...')
insp = inspect(engine)
Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` associated
with the engine may opt to return an :class:`.Inspector` subclass that
provides additional methods specific to the dialect's target database.
"""
@util.deprecated(
"1.4",
"The __init__() method on :class:`.Inspector` is deprecated and "
"will be removed in a future release. Please use the "
":func:`.sqlalchemy.inspect` "
"function on an :class:`.Engine` or :class:`.Connection` in order to "
"acquire an :class:`.Inspector`.",
)
def __init__(self, bind):
"""Initialize a new :class:`.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
For a dialect-specific instance of :class:`.Inspector`, see
:meth:`.Inspector.from_engine`
"""
return self._init_legacy(bind)
@classmethod
def _construct(cls, init, bind):
if hasattr(bind.dialect, "inspector"):
cls = bind.dialect.inspector
self = cls.__new__(cls)
init(self, bind)
return self
def _init_legacy(self, bind):
if hasattr(bind, "exec_driver_sql"):
self._init_connection(bind)
else:
self._init_engine(bind)
def _init_engine(self, engine):
self.bind = self.engine = engine
engine.connect().close()
self._op_context_requires_connect = True
self.dialect = self.engine.dialect
self.info_cache = {}
def _init_connection(self, connection):
self.bind = connection
self.engine = connection.engine
self._op_context_requires_connect = False
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
@util.deprecated(
"1.4",
"The from_engine() method on :class:`.Inspector` is deprecated and "
"will be removed in a future release. Please use the "
":func:`.sqlalchemy.inspect` "
"function on an :class:`.Engine` or :class:`.Connection` in order to "
"acquire an :class:`.Inspector`.",
)
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given
engine or connection.
:param bind: a :class:`~sqlalchemy.engine.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.Engine` or
:class:`~sqlalchemy.engine.Connection`.
This method differs from direct a direct constructor call of
:class:`.Inspector` in that the
:class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to
provide a dialect-specific :class:`.Inspector` instance, which may
provide additional methods.
See the example at :class:`.Inspector`.
"""
return cls._construct(cls._init_legacy, bind)
@inspection._inspects(Connectable)
def _connectable_insp(bind):
# this method should not be used unless some unusual case
# has subclassed "Connectable"
return Inspector._construct(Inspector._init_legacy, bind)
@inspection._inspects(Engine)
def _engine_insp(bind):
return Inspector._construct(Inspector._init_engine, bind)
@inspection._inspects(Connection)
def _connection_insp(bind):
return Inspector._construct(Inspector._init_connection, bind)
@contextlib.contextmanager
def _operation_context(self):
"""Return a context that optimizes for multiple operations on a single
transaction.
This essentially allows connect()/close() to be called if we detected
that we're against an :class:`.Engine` and not a :class:`.Connection`.
"""
if self._op_context_requires_connect:
conn = self.bind.connect()
else:
conn = self.bind
try:
yield conn
finally:
if self._op_context_requires_connect:
conn.close()
@contextlib.contextmanager
def _inspection_context(self):
"""Return an :class:`.Inspector` from this one that will run all
operations on a single connection.
"""
with self._operation_context() as conn:
sub_insp = self._construct(self.__class__._init_connection, conn)
sub_insp.info_cache = self.info_cache
yield sub_insp
@property
def default_schema_name(self):
"""Return the default schema name presented by the dialect
for the current engine's database user.
E.g. this is typically ``public`` for PostgreSQL and ``dbo``
for SQL Server.
"""
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, "get_schema_names"):
with self._operation_context() as conn:
return self.dialect.get_schema_names(
conn, info_cache=self.info_cache
)
return []
def get_table_names(self, schema=None):
"""Return all table names in referred to within a particular schema.
The names are expected to be real tables only, not views.
Views are instead returned using the :meth:`.Inspector.get_view_names`
method.
:param schema: Schema name. If ``schema`` is left at ``None``, the
database's default schema is
used, else the named schema is searched. If the database does not
support named schemas, behavior is undefined if ``schema`` is not
passed as ``None``. For special quoting, use :class:`.quoted_name`.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies. Does not automatically
resolve cycles, and will raise :class:`.CircularDependencyError`
if cycles exist.
.. seealso::
:meth:`.Inspector.get_sorted_table_and_fkc_names`
:attr:`.MetaData.sorted_tables`
"""
with self._operation_context() as conn:
return self.dialect.get_table_names(
conn, schema, info_cache=self.info_cache
)
def has_table(self, table_name, schema=None):
"""Return True if the backend has a table of the given name.
.. versionadded:: 1.4
"""
# TODO: info_cache?
with self._operation_context() as conn:
return self.dialect.has_table(conn, table_name, schema)
def get_sorted_table_and_fkc_names(self, schema=None):
"""Return dependency-sorted table and foreign key constraint names in
referred to within a particular schema.
This will yield 2-tuples of
``(tablename, [(tname, fkname), (tname, fkname), ...])``
consisting of table names in CREATE order grouped with the foreign key
constraint names that are not detected as belonging to a cycle.
The final element
will be ``(None, [(tname, fkname), (tname, fkname), ..])``
which will consist of remaining
foreign key constraint names that would require a separate CREATE
step after-the-fact, based on dependencies between tables.
.. versionadded:: 1.0.-
.. seealso::
:meth:`.Inspector.get_table_names`
:func:`.sort_tables_and_constraints` - similar method which works
with an already-given :class:`.MetaData`.
"""
with self._operation_context() as conn:
tnames = self.dialect.get_table_names(
conn, schema, info_cache=self.info_cache
)
tuples = set()
remaining_fkcs = set()
fknames_for_table = {}
for tname in tnames:
fkeys = self.get_foreign_keys(tname, schema)
fknames_for_table[tname] = set([fk["name"] for fk in fkeys])
for fkey in fkeys:
if tname != fkey["referred_table"]:
tuples.add((fkey["referred_table"], tname))
try:
candidate_sort = list(topological.sort(tuples, tnames))
except exc.CircularDependencyError as err:
for edge in err.edges:
tuples.remove(edge)
remaining_fkcs.update(
(edge[1], fkc) for fkc in fknames_for_table[edge[1]]
)
candidate_sort = list(topological.sort(tuples, tnames))
return [
(tname, fknames_for_table[tname].difference(remaining_fkcs))
for tname in candidate_sort
] + [(None, list(remaining_fkcs))]
def get_temp_table_names(self):
"""return a list of temporary table names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
with self._operation_context() as conn:
return self.dialect.get_temp_table_names(
conn, info_cache=self.info_cache
)
def get_temp_view_names(self):
"""return a list of temporary view names for the current bind.
This method is unsupported by most dialects; currently
only SQLite implements it.
.. versionadded:: 1.0.0
"""
with self._operation_context() as conn:
return self.dialect.get_temp_view_names(
conn, info_cache=self.info_cache
)
def get_table_options(self, table_name, schema=None, **kw):
"""Return a dictionary of options specified when the table of the
given name was created.
This currently includes some options that apply to MySQL tables.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
if hasattr(self.dialect, "get_table_options"):
with self._operation_context() as conn:
return self.dialect.get_table_options(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_view_names(
conn, schema, info_cache=self.info_cache
)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_view_definition(
conn, view_name, schema, info_cache=self.info_cache
)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
* ``name`` - the column's name
* ``type`` - the type of this column; an instance of
:class:`~sqlalchemy.types.TypeEngine`
* ``nullable`` - boolean flag if the column is NULL or NOT NULL
* ``default`` - the column's server default value - this is returned
as a string SQL expression.
* ``autoincrement`` - indicates that the column is auto incremented -
this is returned as a boolean or 'auto'
* ``comment`` - (optional) the commnet on the column. Only some
dialects return this key
* ``computed`` - (optional) when present it indicates that this column
is computed by the database. Only some dialects return this key.
Returned as a dict with the keys:
* ``sqltext`` - the expression used to generate this column returned
as a string SQL expression
* ``persisted`` - (optional) boolean that indicates if the column is
stored in the table
.. versionadded:: 1.3.16 - added support for computed reflection.
* ``dialect_options`` - (optional) a dict with dialect specific options
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
:return: list of dictionaries, each representing the definition of
a database column.
"""
with self._operation_context() as conn:
col_defs = self.dialect.get_columns(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def["type"]
if not isinstance(coltype, TypeEngine):
col_def["type"] = coltype()
return col_defs
def get_pk_constraint(self, table_name, schema=None, **kw):
"""Return information about primary key constraint on `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_pk_constraint(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
name
optional name of the foreign key constraint.
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_foreign_keys(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
column_sorting
optional dict mapping column names to tuple of sort keywords,
which may include ``asc``, ``desc``, ``nullsfirst``, ``nullslast``.
.. versionadded:: 1.3.5
dialect_options
dict of dialect-specific index options. May not be present
for all dialects.
.. versionadded:: 1.0.0
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_indexes(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_unique_constraints(self, table_name, schema=None, **kw):
"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
"""
with self._operation_context() as conn:
return self.dialect.get_unique_constraints(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_table_comment(self, table_name, schema=None, **kw):
"""Return information about the table comment for ``table_name``.
Given a string ``table_name`` and an optional string ``schema``,
return table comment information as a dictionary with these keys:
text
text of the comment.
Raises ``NotImplementedError`` for a dialect that does not support
comments.
.. versionadded:: 1.2
"""
with self._operation_context() as conn:
return self.dialect.get_table_comment(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
def get_check_constraints(self, table_name, schema=None, **kw):
"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
dialect_options
may or may not be present; a dictionary with additional
dialect-specific options for this CHECK constraint
.. versionadded:: 1.3.8
:param table_name: string name of the table. For special quoting,
use :class:`.quoted_name`.
:param schema: string schema name; if omitted, uses the default schema
of the database connection. For special quoting,
use :class:`.quoted_name`.
.. versionadded:: 1.1.0
"""
with self._operation_context() as conn:
return self.dialect.get_check_constraints(
conn, table_name, schema, info_cache=self.info_cache, **kw
)
@util.deprecated_20(
":meth:`.Inspector.reflecttable`",
"The :meth:`.Inspector.reflecttable` method was renamed to "
":meth:`.Inspector.reflect_table`. This deprecated alias "
"will be removed in a future release.",
)
def reflecttable(self, *args, **kwargs):
"See reflect_table. This method name is deprecated"
return self.reflect_table(*args, **kwargs)
def reflect_table(
self,
table,
include_columns,
exclude_columns=(),
resolve_fks=True,
_extend_on=None,
):
"""Given a Table object, load its internal constructs based on
introspection.
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
from sqlalchemy.engine.reflection import Inspector
engine = create_engine('...')
meta = MetaData()
user_table = Table('user', meta)
insp = Inspector.from_engine(engine)
insp.reflect_table(user_table, None)
.. versionchanged:: 1.4 Renamed from ``reflecttable`` to
``reflect_table``
:param table: a :class:`~sqlalchemy.schema.Table` instance.
:param include_columns: a list of string column names to include
in the reflection process. If ``None``, all columns are reflected.
"""
if _extend_on is not None:
if table in _extend_on:
return
else:
_extend_on.add(table)
dialect = self.bind.dialect
with self._operation_context() as conn:
schema = conn.schema_for_object(table)
table_name = table.name
# get table-level arguments that are specifically
# intended for reflection, e.g. oracle_resolve_synonyms.
# these are unconditionally passed to related Table
# objects
reflection_options = dict(
(k, table.dialect_kwargs.get(k))
for k in dialect.reflection_options
if k in table.dialect_kwargs
)
# reflect table options, like mysql_engine
tbl_opts = self.get_table_options(
table_name, schema, **table.dialect_kwargs
)
if tbl_opts:
# add additional kwargs to the Table if the dialect
# returned them
table._validate_dialect_kwargs(tbl_opts)
if util.py2k:
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
found_table = False
cols_by_orig_name = {}
for col_d in self.get_columns(
table_name, schema, **table.dialect_kwargs
):
found_table = True
self._reflect_column(
table,
col_d,
include_columns,
exclude_columns,
cols_by_orig_name,
)
if not found_table:
raise exc.NoSuchTableError(table.name)
self._reflect_pk(
table_name, schema, table, cols_by_orig_name, exclude_columns
)
self._reflect_fk(
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
)
self._reflect_indexes(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_unique_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_check_constraints(
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
)
self._reflect_table_comment(
table_name, schema, table, reflection_options
)
def _reflect_column(
self, table, col_d, include_columns, exclude_columns, cols_by_orig_name
):
orig_name = col_d["name"]
table.dispatch.column_reflect(self, table, col_d)
# fetch name again as column_reflect is allowed to
# change it
name = col_d["name"]
if (include_columns and name not in include_columns) or (
exclude_columns and name in exclude_columns
):
return
coltype = col_d["type"]
col_kw = dict(
(k, col_d[k])
for k in [
"nullable",
"autoincrement",
"quote",
"info",
"key",
"comment",
]
if k in col_d
)
if "dialect_options" in col_d:
col_kw.update(col_d["dialect_options"])
colargs = []
if col_d.get("default") is not None:
default = col_d["default"]
if isinstance(default, sql.elements.TextClause):
default = sa_schema.DefaultClause(default, _reflected=True)
elif not isinstance(default, sa_schema.FetchedValue):
default = sa_schema.DefaultClause(
sql.text(col_d["default"]), _reflected=True
)
colargs.append(default)
if "computed" in col_d:
computed = sa_schema.Computed(**col_d["computed"])
colargs.append(computed)
if "sequence" in col_d:
self._reflect_col_sequence(col_d, colargs)
cols_by_orig_name[orig_name] = col = sa_schema.Column(
name, coltype, *colargs, **col_kw
)
if col.key in table.primary_key:
col.primary_key = True
table.append_column(col)
def _reflect_col_sequence(self, col_d, colargs):
if "sequence" in col_d:
# TODO: mssql and sybase are using this.
seq = col_d["sequence"]
sequence = sa_schema.Sequence(seq["name"], 1, 1)
if "start" in seq:
sequence.start = seq["start"]
if "increment" in seq:
sequence.increment = seq["increment"]
colargs.append(sequence)
def _reflect_pk(
self, table_name, schema, table, cols_by_orig_name, exclude_columns
):
pk_cons = self.get_pk_constraint(
table_name, schema, **table.dialect_kwargs
)
if pk_cons:
pk_cols = [
cols_by_orig_name[pk]
for pk in pk_cons["constrained_columns"]
if pk in cols_by_orig_name and pk not in exclude_columns
]
# update pk constraint name
table.primary_key.name = pk_cons.get("name")
# tell the PKConstraint to re-initialize
# its column collection
table.primary_key._reload(pk_cols)
def _reflect_fk(
self,
table_name,
schema,
table,
cols_by_orig_name,
exclude_columns,
resolve_fks,
_extend_on,
reflection_options,
):
fkeys = self.get_foreign_keys(
table_name, schema, **table.dialect_kwargs
)
for fkey_d in fkeys:
conname = fkey_d["name"]
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_columns = [
cols_by_orig_name[c].key if c in cols_by_orig_name else c
for c in fkey_d["constrained_columns"]
]
if exclude_columns and set(constrained_columns).intersection(
exclude_columns
):
continue
referred_schema = fkey_d["referred_schema"]
referred_table = fkey_d["referred_table"]
referred_columns = fkey_d["referred_columns"]
refspec = []
if referred_schema is not None:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
schema=referred_schema,
autoload_with=self.bind,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(
".".join([referred_schema, referred_table, column])
)
else:
if resolve_fks:
sa_schema.Table(
referred_table,
table.metadata,
autoload=True,
autoload_with=self.bind,
schema=sa_schema.BLANK_SCHEMA,
_extend_on=_extend_on,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
if "options" in fkey_d:
options = fkey_d["options"]
else:
options = {}
table.append_constraint(
sa_schema.ForeignKeyConstraint(
constrained_columns,
refspec,
conname,
link_to_name=True,
**options
)
)
_index_sort_exprs = [
("asc", operators.asc_op),
("desc", operators.desc_op),
("nullsfirst", operators.nullsfirst_op),
("nullslast", operators.nullslast_op),
]
def _reflect_indexes(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d["name"]
columns = index_d["column_names"]
column_sorting = index_d.get("column_sorting", {})
unique = index_d["unique"]
flavor = index_d.get("type", "index")
dialect_options = index_d.get("dialect_options", {})
duplicates = index_d.get("duplicates_constraint")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting %s key for (%s), key covers omitted columns."
% (flavor, ", ".join(columns))
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
idx_cols = []
for c in columns:
try:
idx_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"%s key '%s' was not located in "
"columns for table '%s'" % (flavor, c, table_name)
)
continue
c_sorting = column_sorting.get(c, ())
for k, op in self._index_sort_exprs:
if k in c_sorting:
idx_col = op(idx_col)
idx_cols.append(idx_col)
sa_schema.Index(
name,
*idx_cols,
_table=table,
**dict(list(dialect_options.items()) + [("unique", unique)])
)
def _reflect_unique_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
# Unique Constraints
try:
constraints = self.get_unique_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
conname = const_d["name"]
columns = const_d["column_names"]
duplicates = const_d.get("duplicates_index")
if include_columns and not set(columns).issubset(include_columns):
util.warn(
"Omitting unique constraint key for (%s), "
"key covers omitted columns." % ", ".join(columns)
)
continue
if duplicates:
continue
# look for columns by orig name in cols_by_orig_name,
# but support columns that are in-Python only as fallback
constrained_cols = []
for c in columns:
try:
constrained_col = (
cols_by_orig_name[c]
if c in cols_by_orig_name
else table.c[c]
)
except KeyError:
util.warn(
"unique constraint key '%s' was not located in "
"columns for table '%s'" % (c, table_name)
)
else:
constrained_cols.append(constrained_col)
table.append_constraint(
sa_schema.UniqueConstraint(*constrained_cols, name=conname)
)
def _reflect_check_constraints(
self,
table_name,
schema,
table,
cols_by_orig_name,
include_columns,
exclude_columns,
reflection_options,
):
try:
constraints = self.get_check_constraints(table_name, schema)
except NotImplementedError:
# optional dialect feature
return
for const_d in constraints:
table.append_constraint(sa_schema.CheckConstraint(**const_d))
def _reflect_table_comment(
self, table_name, schema, table, reflection_options
):
try:
comment_dict = self.get_table_comment(table_name, schema)
except NotImplementedError:
return
else:
table.comment = comment_dict.get("text", None)
|
the-stack_0_2345 | # Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['Averager']
import time
import itertools
import numpy as np
from .filter import Filter
from auspex.log import logger
from auspex.parameter import Parameter, FloatParameter
from auspex.stream import InputConnector, OutputConnector, DataStreamDescriptor, DataAxis
def view_fields(a, names):
"""
`a` must be a numpy structured array.
`names` is the collection of field names to keep.
Returns a view of the array `a` (not a copy).
http://stackoverflow.com/questions/37079175/how-to-remove-a-column-from-a-structured-numpy-array-without-copying-it
"""
dt = a.dtype
formats = [dt.fields[name][0] for name in names]
offsets = [dt.fields[name][1] for name in names]
itemsize = a.dtype.itemsize
newdt = np.dtype(dict(names=names,
formats=formats,
offsets=offsets,
itemsize=itemsize))
b = a.view(newdt)
return b
def remove_fields(a, names):
"""
`a` must be a numpy structured array.
`names` is the collection of field names to remove.
Returns a view of the array `a` (not a copy).
http://stackoverflow.com/questions/37079175/how-to-remove-a-column-from-a-structured-numpy-array-without-copying-it
"""
dt = a.dtype
keep_names = [name for name in dt.names if name not in names]
return view_fields(a, keep_names)
class Averager(Filter):
"""Takes data and collapses along the specified axis."""
sink = InputConnector()
partial_average = OutputConnector()
source = OutputConnector()
final_variance = OutputConnector()
final_counts = OutputConnector()
axis = Parameter()
threshold = FloatParameter()
def __init__(self, axis=None, threshold=0.5, **kwargs):
super(Averager, self).__init__(**kwargs)
self.axis.value = axis
self.threshold.value = threshold
self.points_before_final_average = None
self.points_before_partial_average = None
self.sum_so_far = None
self.num_averages = None
self.passthrough = False
# Rate limiting for partial averages
self.last_update = time.time()
self.update_interval = 0.5
def update_descriptors(self):
logger.debug('Updating averager "%s" descriptors based on input descriptor: %s.', self.filter_name, self.sink.descriptor)
descriptor_in = self.sink.descriptor
names = [a.name for a in descriptor_in.axes]
self.axis.allowed_values = names
if self.axis.value is None:
self.axis.value = descriptor_in.axes[0].name
# Convert named axes to an index
if self.axis.value not in names:
raise ValueError("Could not find axis {} within the DataStreamDescriptor {}".format(self.axis.value, descriptor_in))
self.axis_num = descriptor_in.axis_num(self.axis.value)
logger.debug("Averaging over axis #%d: %s", self.axis_num, self.axis.value)
self.data_dims = descriptor_in.data_dims()
# If we only have a single point along this axis, then just pass the data straight through
if self.data_dims[self.axis_num] == 1:
logger.debug("Averaging over a singleton axis")
self.passthrough = True
if self.axis_num == len(descriptor_in.axes) - 1:
logger.debug("Performing scalar average!")
self.points_before_partial_average = 1
self.avg_dims = [1]
else:
self.points_before_partial_average = descriptor_in.num_points_through_axis(self.axis_num+1)
self.avg_dims = self.data_dims[self.axis_num+1:]
# If we get multiple final average simultaneously
self.reshape_dims = self.data_dims[self.axis_num:]
if self.axis_num > 0:
self.reshape_dims = [-1] + self.reshape_dims
self.mean_axis = self.axis_num - len(self.data_dims)
self.points_before_final_average = descriptor_in.num_points_through_axis(self.axis_num)
logger.debug("Points before partial average: %s.", self.points_before_partial_average)
logger.debug("Points before final average: %s.", self.points_before_final_average)
logger.debug("Data dimensions are %s", self.data_dims)
logger.debug("Averaging dimensions are %s", self.avg_dims)
# Define final axis descriptor
descriptor = descriptor_in.copy()
self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
logger.debug("Number of partial averages is %d", self.num_averages)
if len(descriptor.axes) == 0:
# We will be left with only a single point here!
descriptor.add_axis(DataAxis("result", [0]))
self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
self.current_avg_frame = np.zeros(self.points_before_final_average, dtype=descriptor.dtype)
self.partial_average.descriptor = descriptor
self.source.descriptor = descriptor
self.excited_counts = np.zeros(self.data_dims, dtype=np.int64)
# We can update the visited_tuples upfront if none
# of the sweeps are adaptive...
desc_out_dtype = descriptor_in.axis_data_type(with_metadata=True, excluding_axis=self.axis.value)
if not descriptor_in.is_adaptive():
vals = [a.points_with_metadata() for a in descriptor_in.axes if a.name != self.axis.value]
nested_list = list(itertools.product(*vals))
flattened_list = [tuple((val for sublist in line for val in sublist)) for line in nested_list]
descriptor.visited_tuples = np.core.records.fromrecords(flattened_list, dtype=desc_out_dtype)
else:
descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)
for stream in self.partial_average.output_streams:
stream.set_descriptor(descriptor)
stream.descriptor.buffer_mult_factor = 20
stream.end_connector.update_descriptors()
for stream in self.source.output_streams:
stream.set_descriptor(descriptor)
stream.end_connector.update_descriptors()
# Define variance axis descriptor
descriptor_var = descriptor_in.copy()
descriptor_var.data_name = "Variance"
descriptor_var.pop_axis(self.axis.value)
if descriptor_var.unit:
descriptor_var.unit = descriptor_var.unit + "^2"
descriptor_var.metadata["num_averages"] = self.num_averages
self.final_variance.descriptor= descriptor_var
# Define counts axis descriptor
descriptor_count = descriptor_in.copy()
descriptor_count.data_name = "Counts"
descriptor_count.dtype = np.float64
descriptor_count.pop_axis(self.axis.value)
descriptor_count.add_axis(DataAxis("state", [0,1]),position=0)
if descriptor_count.unit:
descriptor_count.unit = "counts"
descriptor_count.metadata["num_counts"] = self.num_averages
self.final_counts.descriptor = descriptor_count
if not descriptor_in.is_adaptive():
descriptor_var.visited_tuples = np.core.records.fromrecords(flattened_list, dtype=desc_out_dtype)
else:
descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)
for stream in self.final_variance.output_streams:
stream.set_descriptor(descriptor_var)
stream.end_connector.update_descriptors()
for stream in self.final_counts.output_streams:
stream.set_descriptor(descriptor_count)
stream.end_connector.update_descriptors()
def final_init(self):
if self.points_before_final_average is None:
raise Exception("Average has not been initialized. Run 'update_descriptors'")
self.completed_averages = 0
self.idx_frame = 0
self.idx_global = 0
# We only need to accumulate up to the averaging axis
# BUT we may get something longer at any given time!
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
def process_data(self, data):
if self.passthrough:
for os in self.source.output_streams:
os.push(data)
for os in self.final_variance.output_streams:
os.push(data*0.0)
for os in self.partial_average.output_streams:
os.push(data)
return
# TODO: handle unflattened data separately
if len(data.shape) > 1:
data = data.flatten()
#handle single points
elif not isinstance(data, np.ndarray) and (data.size == 1):
data = np.array([data])
if self.carry.size > 0:
data = np.concatenate((self.carry, data))
self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)
idx = 0
while idx < data.size:
#check whether we have enough data to fill an averaging frame
if data.size - idx >= self.points_before_final_average:
#logger.debug("Have {} points, enough for final avg.".format(data.size))
# How many chunks can we process at once?
num_chunks = int((data.size - idx)/self.points_before_final_average)
new_points = num_chunks*self.points_before_final_average
reshaped = data[idx:idx+new_points].reshape(self.reshape_dims)
averaged = reshaped.mean(axis=self.mean_axis)
idx += new_points
# do state assignment
excited_states = (np.real(reshaped) > self.threshold.value).sum(axis=self.mean_axis)
ground_states = self.num_averages - excited_states
if self.sink.descriptor.is_adaptive():
new_tuples = self.sink.descriptor.tuples()[self.idx_global:self.idx_global + new_points]
new_tuples_stripped = remove_fields(new_tuples, self.axis.value)
take_axis = -1 if self.axis_num > 0 else 0
reduced_tuples = new_tuples_stripped.reshape(self.reshape_dims).take((0,), axis=take_axis)
self.idx_global += new_points
# Add to Visited tuples
if self.sink.descriptor.is_adaptive():
for os in self.source.output_streams + self.final_variance.output_streams + self.partial_average.output_streams:
os.descriptor.visited_tuples = np.append(os.descriptor.visited_tuples, reduced_tuples)
for os in self.source.output_streams:
os.push(averaged)
for os in self.final_variance.output_streams:
os.push(reshaped.var(axis=self.mean_axis, ddof=1)) # N-1 in the denominator
for os in self.partial_average.output_streams:
os.push(averaged)
for os in self.final_counts.output_streams:
os.push(ground_states)
os.push(excited_states)
# Maybe we can fill a partial frame
elif data.size - idx >= self.points_before_partial_average:
# logger.info("Have {} points, enough for partial avg.".format(data.size))
# How many chunks can we process at once?
num_chunks = int((data.size - idx)/self.points_before_partial_average)
new_points = num_chunks*self.points_before_partial_average
# Find the appropriate dimensions for the partial
partial_reshape_dims = self.reshape_dims[:]
partial_reshape_dims[self.mean_axis] = -1
partial_reshape_dims = partial_reshape_dims[self.mean_axis:]
reshaped = data[idx:idx+new_points].reshape(partial_reshape_dims)
summed = reshaped.sum(axis=self.mean_axis)
self.sum_so_far += summed
self.current_avg_frame[self.idx_frame:self.idx_frame+new_points] = data[idx:idx+new_points]
idx += new_points
self.idx_frame += new_points
self.completed_averages += num_chunks
# If we now have enoough for the final average, push to both partial and final...
if self.completed_averages == self.num_averages:
reshaped = self.current_avg_frame.reshape(partial_reshape_dims)
for os in self.source.output_streams + self.partial_average.output_streams:
os.push(reshaped.mean(axis=self.mean_axis))
for os in self.final_variance.output_streams:
os.push(np.real(reshaped).var(axis=self.mean_axis, ddof=1)+1j*np.imag(reshaped).var(axis=self.mean_axis, ddof=1)) # N-1 in the denominator
# do state assignment
excited_states = (np.real(reshaped) < self.threshold.value).sum(axis=self.mean_axis)
ground_states = self.num_averages - excited_states
for os in self.final_counts.output_streams:
os.push(ground_states)
os.push(excited_states)
self.sum_so_far[:] = 0.0
self.current_avg_frame[:] = 0.0
self.completed_averages = 0
self.idx_frame = 0
else:
# Emit a partial average since we've accumulated enough data
if (time.time() - self.last_update >= self.update_interval):
for os in self.partial_average.output_streams:
os.push(self.sum_so_far/self.completed_averages)
self.last_update = time.time()
# otherwise just add it to the carry
else:
self.carry = data[idx:]
break
|
the-stack_0_2346 | import os
import argparse
import pandas as pd
def combine_ftype(on_private):
# Content_2_index = {
# 0: "Empty",
# 1: "Pasta",
# 2: "Rice",
# 3: "Water"
# }
if on_private:
vggish_path = './filling_type/vggish/predictions/200903163404/ftype_private_test_agg_vggish.csv'
rf_path = './filling_type/CORSMAL-pyAudioAnalysis/ftype-randomforest-final_result_private_test.csv'
else:
vggish_path = './filling_type/vggish/predictions/200903163404/ftype_public_test_agg_vggish.csv'
rf_path = './filling_type/CORSMAL-pyAudioAnalysis/ftype-randomforest-final_result_public_test.csv'
vggish = pd.read_csv(vggish_path)
ftype_randomforest = pd.read_csv(rf_path)
ftype_randomforest = ftype_randomforest.sort_values(['Object', 'Sequence']).reset_index(drop=True)
random_forest_preds = ftype_randomforest[[
'Filling type prob0', 'Filling type prob1', 'Filling type prob2', 'Filling type prob3'
]]
vggish_preds = vggish[['ftype_prob_0', 'ftype_prob_1', 'ftype_prob_2', 'ftype_prob_3']]
ftype_combined = (random_forest_preds.values + vggish_preds.values) / 2
# return pd.Series([Content_2_index[cls] for cls in ftype_combined.argmax(axis=1)])
return pd.Series([cls for cls in ftype_combined.argmax(axis=1)])
def combine_flvl(on_private):
# filling_2_value = {0: 0, 1: 50, 2: 90}
cols_with_probs_1 = ['flvl_prob_0', 'flvl_prob_1', 'flvl_prob_2']
if on_private:
vggish_path = './filling_level/vggish/predictions/200903162117/flvl_private_test_agg_vggish.csv'
r21d_path = './filling_level/r21d_rgb/predictions/200903214601/flvl_private_test_agg_r21d_rgb.csv'
rf_path = './filling_level/CORSMAL-pyAudioAnalysis/flevel-randomforest-final_result_private_test.csv'
else:
vggish_path = './filling_level/vggish/predictions/200903162117/flvl_public_test_agg_vggish.csv'
r21d_path = './filling_level/r21d_rgb/predictions/200903214601/flvl_public_test_agg_r21d_rgb.csv'
rf_path = './filling_level/CORSMAL-pyAudioAnalysis/flevel-randomforest-final_result_public_test.csv'
flvl_vggish = pd.read_csv(vggish_path)
flvl_r21d = pd.read_csv(r21d_path)
flvl_vggish = flvl_vggish[cols_with_probs_1]
flvl_r21d = flvl_r21d[cols_with_probs_1]
# flvl_combined = (flvl_vggish.values + flvl_r21d.values) / 2
# flvl_combined = flvl_vggish.values
# we also observed that adding pyAudioAnalysis' random forest predictions, improves valid performance
cols_with_probs_2 = ['Filling level [%] prob0', 'Filling level [%] prob1', 'Filling level [%] prob2']
flvl_rf = pd.read_csv(rf_path)
flvl_rf = flvl_rf.sort_values(['Object', 'Sequence']).reset_index(drop=True)
flvl_rf = flvl_rf[cols_with_probs_2]
flvl_combined = (flvl_vggish.values + flvl_r21d.values + flvl_rf.values) / 3
# return pd.Series([int(filling_2_value[cls]) for cls in flvl_combined.argmax(axis=1)])
return pd.Series([int(cls) for cls in flvl_combined.argmax(axis=1)])
def capacity(on_private):
if on_private:
cap_path = './capacity/results/estimation_combination_private_test.csv'
# cap_path = './capacity/results/estimation_combination_with_0_private_test.csv'
# cap_path = './capacity/results/estimation_combination_with_1_private_test.csv'
else:
cap_path = './capacity/results/estimation_combination_public_test.csv'
# cap_path = './capacity/results/estimation_combination_with_0_public_test.csv'
# cap_path = './capacity/results/estimation_combination_with_1_public_test.csv'
a = pd.read_csv(cap_path)
return a['capacity[mL]']
# def estimate_fmass(submission):
# Content_2_density = {
# "Empty": 0.0, # "Empty"
# 0: 0.0, # "Empty"
# "Pasta": 0.41, # "Pasta"
# 1: 0.41, # "Pasta"
# "Rice": 0.85, # "Rice"
# 2: 0.85, # "Rice"
# "Water": 1.00, # "Water"
# 3: 1.00, # "Water"
# }
# fmass_col = []
# for cont, seq, capacity, c_mass, ftype, flvl, fmass in submission.values:
# fmass = Content_2_density[ftype] * flvl / 100 * capacity
# fmass_col.append(fmass)
# return pd.Series(fmass_col)
def make_submission_form(data_path, on_private):
columns = ['Container ID', 'Sequence', 'Filling type', 'Filling level', 'Container Capacity']
submission = pd.DataFrame(columns=columns)
if on_private:
container_ids = ['13', '14', '15']
else:
container_ids = ['10', '11', '12']
# creating columns for container id and sequence using filenames from audio folder – 0053_audio.wav -> 53
object_list = []
sequence_list = []
for container_id in container_ids:
path = os.path.join(data_path, container_id, 'audio')
filenames = sorted(os.listdir(path))
seq_ids = [int(fname.replace('_audio.wav', '')) for fname in filenames]
sequence_list.extend(seq_ids)
object_list.extend([container_id] * len(seq_ids))
submission['Container ID'] = object_list
submission['Sequence'] = sequence_list
return submission
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--predict_on_private', dest='predict_on_private', action='store_true', default=False)
parser.add_argument('--data_path', default='./dataset/')
args = parser.parse_args()
# Gather prediction for the public test set
submission_public = make_submission_form(args.data_path, on_private=False)
submission_public['Filling type'] = combine_ftype(on_private=False)
submission_public['Filling level'] = combine_flvl(on_private=False)
submission_public['Container Capacity'] = capacity(on_private=False)
# submission_public['Filling mass'] = estimate_fmass(submission_public)
submission_public.to_csv('./submission_public_test.csv', index=False)
print('Formed predictions in ./submission_public_test.csv')
# If specified, gather prediction for the public test set
if args.predict_on_private:
submission_private = make_submission_form(args.data_path, on_private=True)
submission_private['Filling type'] = combine_ftype(on_private=True)
submission_private['Filling level'] = combine_flvl(on_private=True)
submission_private['Container Capacity'] = capacity(on_private=True)
# submission_private['Filling mass'] = estimate_fmass(submission_private)
submission_private.to_csv('./submission_private_test.csv', index=False)
print('Formed predictions in ./submission_private_test.csv')
|
the-stack_0_2349 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.training.python.training import sampling_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
class StratifiedSampleTest(test.TestCase):
def testGraphBuildAssertionFailures(self):
val = [array_ops.zeros([1, 3]), array_ops.ones([1, 5])]
label = constant_op.constant([1], shape=[1]) # must have batch dimension
probs = [.2] * 5
init_probs = [.1, .3, .1, .3, .2]
batch_size = 16
# Label must have only batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([]),
probs,
batch_size,
init_probs,
enqueue_many=True)
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
array_ops.zeros([1, 1]),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Label must not be one-hot.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val,
constant_op.constant([0, 1, 0, 0, 0]),
probs, batch_size, init_probs)
# Data must be list, not singleton tensor.
with self.assertRaises(TypeError):
sampling_ops.stratified_sample(
array_ops.zeros([1, 3]), label, probs, batch_size, init_probs)
# Data must have batch dimension if enqueue_many is True.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
constant_op.constant(1),
probs,
batch_size,
init_probs,
enqueue_many=True)
# Batch dimensions on data and labels should be equal.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
[array_ops.zeros([2, 1])],
label,
probs,
batch_size,
init_probs,
enqueue_many=True)
# Probabilities must be numpy array, python list, or tensor.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(val, label, 1, batch_size, init_probs)
# Probabilities shape must be fully defined.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val,
label,
array_ops.placeholder(
dtypes.float32, shape=[None]),
batch_size,
init_probs)
# In the rejection sampling case, make sure that probability lengths are
# the same.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.1] * 10, batch_size, init_probs=[.2] * 5)
# In the rejection sampling case, make sure that zero initial probability
# classes also have zero target probability.
with self.assertRaises(ValueError):
sampling_ops.stratified_sample(
val, label, [.2, .4, .4], batch_size, init_probs=[0, .5, .5])
def testRuntimeAssertionFailures(self):
valid_probs = [.2] * 5
valid_labels = [1, 2, 3]
vals = [array_ops.zeros([3, 1])]
illegal_labels = [
[0, -1, 1], # classes must be nonnegative
[5, 1, 1], # classes must be less than number of classes
[2, 3], # data and label batch size must be the same
]
illegal_probs = [
[.1] * 5, # probabilities must sum to one
[-.5, .5, .5, .4, .1], # probabilities must be non-negative
]
# Set up graph with illegal label vector.
label_ph = array_ops.placeholder(dtypes.int32, shape=[None])
probs_ph = array_ops.placeholder(
dtypes.float32, shape=[5]) # shape must be defined
val_tf, lbl_tf, prob_tf = sampling_ops._verify_input( # pylint: disable=protected-access
vals, label_ph, [probs_ph])
for illegal_label in illegal_labels:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([val_tf, lbl_tf],
feed_dict={label_ph: illegal_label,
probs_ph: valid_probs})
for illegal_prob in illegal_probs:
# Run session that should fail.
with self.test_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run([prob_tf],
feed_dict={label_ph: valid_labels,
probs_ph: illegal_prob})
def testCanBeCalledMultipleTimes(self):
batch_size = 20
val_input_batch = [array_ops.zeros([2, 3, 4])]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
batches = sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
batches += sampling_ops.stratified_sample(
val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
summary_op = logging_ops.merge_summary(
ops.get_collection(ops.GraphKeys.SUMMARIES))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run(batches + (summary_op,))
coord.request_stop()
coord.join(threads)
def testRejectionBatchingBehavior(self):
batch_size = 20
input_batch_size = 11
val_input_batch = [array_ops.zeros([input_batch_size, 2, 3, 4])]
lbl_input_batch = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 1,
lambda: array_ops.ones([input_batch_size], dtype=dtypes.int32) * 3)
probs = np.array([0, .2, 0, .8, 0])
data_batch, labels = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, .3, 0, .7, 0],
enqueue_many=True)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([data_batch, labels])
coord.request_stop()
coord.join(threads)
def testBatchDimensionNotRequired(self):
classes = 5
# Probs must be a tensor, since we pass it directly to _verify_input.
probs = constant_op.constant([1.0 / classes] * classes)
# Make sure that these vals/labels pairs don't throw any runtime exceptions.
legal_input_pairs = [
(np.zeros([2, 3]), [x % classes for x in range(2)]), # batch dim 2
(np.zeros([4, 15]), [x % classes for x in range(4)]), # batch dim 4
(np.zeros([10, 1]), [x % classes for x in range(10)]), # batch dim 10
]
# Set up graph with placeholders.
vals_ph = array_ops.placeholder(
dtypes.float32) # completely undefined shape
labels_ph = array_ops.placeholder(
dtypes.int32) # completely undefined shape
val_tf, labels_tf, _ = sampling_ops._verify_input( # pylint: disable=protected-access
[vals_ph], labels_ph, [probs])
# Run graph to make sure there are no shape-related runtime errors.
for vals, labels in legal_input_pairs:
with self.test_session() as sess:
sess.run([val_tf, labels_tf],
feed_dict={vals_ph: vals,
labels_ph: labels})
def testRejectionDataListInput(self):
batch_size = 20
val_input_batch = [
array_ops.zeros([2, 3, 4]), array_ops.ones([2, 4]), array_ops.ones(2) *
3
]
lbl_input_batch = array_ops.ones([], dtype=dtypes.int32)
probs = np.array([0, 1, 0, 0, 0])
val_list, lbls = sampling_ops.stratified_sample(
val_input_batch,
lbl_input_batch,
probs,
batch_size,
init_probs=[0, 1, 0, 0, 0])
# Check output shapes.
self.assertTrue(isinstance(val_list, list))
self.assertEqual(len(val_list), len(val_input_batch))
self.assertTrue(isinstance(lbls, ops.Tensor))
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
out = sess.run(val_list + [lbls])
coord.request_stop()
coord.join(threads)
# Check output shapes.
self.assertEqual(len(out), len(val_input_batch) + 1)
def normalBehaviorHelper(self, sampler):
# Set up graph.
random_seed.set_random_seed(1234)
lbl1 = 0
lbl2 = 3
# This cond allows the necessary class queues to be populated.
label = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(lbl1), lambda: constant_op.constant(lbl2))
val = [np.array([1, 4]) * label]
probs = np.array([.8, 0, 0, .2, 0])
batch_size = 16
data_batch, labels = sampler(val, label, probs, batch_size)
# Run session and keep track of how frequently the labels and values appear.
data_l = []
label_l = []
with self.test_session() as sess:
# Need to initialize variables that keep running total of classes seen.
variables.global_variables_initializer().run()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(20):
[data], lbls = sess.run([data_batch, labels])
data_l.append(data)
label_l.append(lbls)
coord.request_stop()
coord.join(threads)
# First check that the data matches the labels.
for lbl, data in zip(label_l, data_l):
for i in range(batch_size):
self.assertListEqual(list(np.array([1, 4]) * lbl[i]), list(data[i, :]))
# Check that the labels are approximately correct.
expected_label = probs[0] * lbl1 + probs[3] * lbl2
lbl_list = range(len(probs))
lbl_std_dev = np.sqrt(np.sum((np.square(lbl_list - expected_label))))
lbl_std_dev_of_mean = lbl_std_dev / np.sqrt(len(label_l)) # CLT
actual_lbl = np.mean(label_l)
# Tolerance is 3 standard deviations of the mean. According to the central
# limit theorem, this should cover 99.7% of cases. Note that since the seed
# is fixed, for a given implementation, this test will pass or fail 100% of
# the time. This use of assertNear is to cover cases where someone changes
# an implementation detail, which would cause the random behavior to differ.
self.assertNear(actual_lbl, expected_label, 3 * lbl_std_dev_of_mean)
def testRejectionNormalBehavior(self):
initial_p = [.7, 0, 0, .3, 0]
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val,
lbls,
probs,
batch,
init_probs=initial_p,
enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
def testRejectionNormalBehaviorWithOnlineInitPEstimate(self):
def curried_sampler(val, lbls, probs, batch, enqueue_many=False):
return sampling_ops.stratified_sample(
val, lbls, probs, batch, init_probs=None, enqueue_many=enqueue_many)
self.normalBehaviorHelper(curried_sampler)
class RejectionSampleTest(test.TestCase):
def testGraphConstructionFailures(self):
accept_prob_fn = lambda _: constant_op.constant(1.0)
batch_size = 32
# Data must have batch dimension if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([])], accept_prob_fn, batch_size, enqueue_many=True)
# Batch dimensions should be equal if `enqueue_many` is `True`.
with self.assertRaises(ValueError):
sampling_ops.rejection_sample(
[array_ops.zeros([5, 1]), array_ops.zeros([4, 1])],
accept_prob_fn,
batch_size,
enqueue_many=True)
def testRuntimeFailures(self):
prob_ph = array_ops.placeholder(dtypes.float32, [])
accept_prob_fn = lambda _: prob_ph
batch_size = 32
# Set up graph.
random_seed.set_random_seed(1234)
sampling_ops.rejection_sample(
[array_ops.zeros([])],
accept_prob_fn,
batch_size,
runtime_checks=True,
name='rejection_sample')
prob_tensor = ops.get_default_graph().get_tensor_by_name(
'rejection_sample/prob_with_checks:0')
# Run session that should fail.
with self.test_session() as sess:
for illegal_prob in [-0.1, 1.1]:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(prob_tensor, feed_dict={prob_ph: illegal_prob})
def testNormalBehavior(self):
tensor_list = [
control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(2.0))
]
accept_prob_fn = lambda x: x[0] - 1.0
batch_size = 10
# Set up graph.
sample = sampling_ops.rejection_sample(tensor_list, accept_prob_fn,
batch_size)
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
for _ in range(5):
sample_np = sess.run(sample)[0]
self.assertListEqual([2.0] * batch_size, list(sample_np))
coord.request_stop()
coord.join(threads)
class ConditionalBatchTest(test.TestCase):
def testConditionallyEnqueueAndBatch(self):
random_seed.set_random_seed(1234)
tensor = control_flow_ops.cond(
math_ops.greater(.5, random_ops.random_uniform([])),
lambda: constant_op.constant(1.0), lambda: constant_op.constant(2.0))
keep_input = math_ops.equal(tensor, 2.0)
batch_size = 4
# Set up the test graph.
[batch] = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
# Check conditional operation.
with self.test_session():
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(coord=coord)
batch_np = batch.eval()
coord.request_stop()
coord.join(threads)
# Check that all elements in batch come from tensors with acceptance prob
# 1, so that none come from acceptance prob 0.
self.assertListEqual(list(batch_np), [2.0] * batch_size)
def testConditionallyEnqueueAndBatchTypes(self):
tensor = constant_op.constant(1.0)
keep_input = constant_op.constant(True)
batch_size = 4
# Check that output types are the same for 1 and 2-length input lists.
output1 = sampling_ops._conditional_batch([tensor], keep_input, batch_size) # pylint: disable=protected-access
output2 = sampling_ops._conditional_batch( # pylint: disable=protected-access
[tensor, tensor], keep_input, batch_size)
self.assertEqual(type(output1), type(output2))
if __name__ == '__main__':
test.main()
|
the-stack_0_2350 | import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
import excelexporters.spacestatistics
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the space
# Step 3: query energy categories
# Step 4: query associated sensors
# Step 5: query associated points
# Step 6: query base period energy input
# Step 7: query reporting period energy input
# Step 8: query tariff data
# Step 9: query associated sensors and points data
# Step 10: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
space_id = req.params.get('spaceid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if space_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
else:
space_id = str.strip(space_id)
if not space_id.isdigit() or int(space_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_START_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_END_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_END_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_END_DATETIME')
################################################################################################################
# Step 2: query the space
################################################################################################################
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_spaces "
" WHERE id = %s ", (space_id,))
row_space = cursor_system.fetchone()
if row_space is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.SPACE_NOT_FOUND')
space = dict()
space['id'] = row_space[0]
space['name'] = row_space[1]
space['area'] = row_space[2]
space['cost_center_id'] = row_space[3]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated sensors
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_sensors se, tbl_spaces_sensors spse, "
" tbl_points po, tbl_sensors_points sepo "
" WHERE sp.id = %s AND sp.id = spse.space_id AND spse.sensor_id = se.id "
" AND se.id = sepo.sensor_id AND sepo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query associated points
################################################################################################################
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_spaces_points sppo, tbl_points po "
" WHERE sp.id = %s AND sp.id = sppo.space_id AND sppo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 6: query base period energy input
################################################################################################################
base = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
base[energy_category_id] = dict()
base[energy_category_id]['timestamps'] = list()
base[energy_category_id]['values'] = list()
base[energy_category_id]['subtotal'] = Decimal(0.0)
base[energy_category_id]['mean'] = None
base[energy_category_id]['median'] = None
base[energy_category_id]['minimum'] = None
base[energy_category_id]['maximum'] = None
base[energy_category_id]['stdev'] = None
base[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
base[energy_category_id]['mean'], \
base[energy_category_id]['median'], \
base[energy_category_id]['minimum'], \
base[energy_category_id]['maximum'], \
base[energy_category_id]['stdev'], \
base[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
base[energy_category_id]['timestamps'].append(current_datetime)
base[energy_category_id]['values'].append(actual_value)
base[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 7: query reporting period energy input
################################################################################################################
reporting = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
reporting[energy_category_id] = dict()
reporting[energy_category_id]['timestamps'] = list()
reporting[energy_category_id]['values'] = list()
reporting[energy_category_id]['subtotal'] = Decimal(0.0)
reporting[energy_category_id]['mean'] = None
reporting[energy_category_id]['median'] = None
reporting[energy_category_id]['minimum'] = None
reporting[energy_category_id]['maximum'] = None
reporting[energy_category_id]['stdev'] = None
reporting[energy_category_id]['variance'] = None
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically, \
reporting[energy_category_id]['mean'], \
reporting[energy_category_id]['median'], \
reporting[energy_category_id]['minimum'], \
reporting[energy_category_id]['maximum'], \
reporting[energy_category_id]['stdev'], \
reporting[energy_category_id]['variance'] = \
utilities.statistics_hourly_data_by_period(rows_space_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
reporting[energy_category_id]['timestamps'].append(current_datetime)
reporting[energy_category_id]['values'].append(actual_value)
reporting[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 8: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 9: query associated sensors and points data
################################################################################################################
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 10: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
result = dict()
result['space'] = dict()
result['space']['name'] = space['name']
result['space']['area'] = space['area']
result['base_period'] = dict()
result['base_period']['names'] = list()
result['base_period']['units'] = list()
result['base_period']['timestamps'] = list()
result['base_period']['values'] = list()
result['base_period']['subtotals'] = list()
result['base_period']['means'] = list()
result['base_period']['medians'] = list()
result['base_period']['minimums'] = list()
result['base_period']['maximums'] = list()
result['base_period']['stdevs'] = list()
result['base_period']['variances'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['base_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period']['timestamps'].append(base[energy_category_id]['timestamps'])
result['base_period']['values'].append(base[energy_category_id]['values'])
result['base_period']['subtotals'].append(base[energy_category_id]['subtotal'])
result['base_period']['means'].append(base[energy_category_id]['mean'])
result['base_period']['medians'].append(base[energy_category_id]['median'])
result['base_period']['minimums'].append(base[energy_category_id]['minimum'])
result['base_period']['maximums'].append(base[energy_category_id]['maximum'])
result['base_period']['stdevs'].append(base[energy_category_id]['stdev'])
result['base_period']['variances'].append(base[energy_category_id]['variance'])
result['reporting_period'] = dict()
result['reporting_period']['names'] = list()
result['reporting_period']['energy_category_ids'] = list()
result['reporting_period']['units'] = list()
result['reporting_period']['timestamps'] = list()
result['reporting_period']['values'] = list()
result['reporting_period']['subtotals'] = list()
result['reporting_period']['means'] = list()
result['reporting_period']['means_per_unit_area'] = list()
result['reporting_period']['means_increment_rate'] = list()
result['reporting_period']['medians'] = list()
result['reporting_period']['medians_per_unit_area'] = list()
result['reporting_period']['medians_increment_rate'] = list()
result['reporting_period']['minimums'] = list()
result['reporting_period']['minimums_per_unit_area'] = list()
result['reporting_period']['minimums_increment_rate'] = list()
result['reporting_period']['maximums'] = list()
result['reporting_period']['maximums_per_unit_area'] = list()
result['reporting_period']['maximums_increment_rate'] = list()
result['reporting_period']['stdevs'] = list()
result['reporting_period']['stdevs_per_unit_area'] = list()
result['reporting_period']['stdevs_increment_rate'] = list()
result['reporting_period']['variances'] = list()
result['reporting_period']['variances_per_unit_area'] = list()
result['reporting_period']['variances_increment_rate'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period']['energy_category_ids'].append(energy_category_id)
result['reporting_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period']['timestamps'].append(reporting[energy_category_id]['timestamps'])
result['reporting_period']['values'].append(reporting[energy_category_id]['values'])
result['reporting_period']['subtotals'].append(reporting[energy_category_id]['subtotal'])
result['reporting_period']['means'].append(reporting[energy_category_id]['mean'])
result['reporting_period']['means_per_unit_area'].append(
reporting[energy_category_id]['mean'] / space['area']
if reporting[energy_category_id]['mean'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['means_increment_rate'].append(
(reporting[energy_category_id]['mean'] - base[energy_category_id]['mean']) /
base[energy_category_id]['mean'] if (base[energy_category_id]['mean'] is not None and
base[energy_category_id]['mean'] > Decimal(0.0))
else None)
result['reporting_period']['medians'].append(reporting[energy_category_id]['median'])
result['reporting_period']['medians_per_unit_area'].append(
reporting[energy_category_id]['median'] / space['area']
if reporting[energy_category_id]['median'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['medians_increment_rate'].append(
(reporting[energy_category_id]['median'] - base[energy_category_id]['median']) /
base[energy_category_id]['median'] if (base[energy_category_id]['median'] is not None and
base[energy_category_id]['median'] > Decimal(0.0))
else None)
result['reporting_period']['minimums'].append(reporting[energy_category_id]['minimum'])
result['reporting_period']['minimums_per_unit_area'].append(
reporting[energy_category_id]['minimum'] / space['area']
if reporting[energy_category_id]['minimum'] is not None and
space['area'] is not None and space['area'] > Decimal(0.0)
else None)
result['reporting_period']['minimums_increment_rate'].append(
(reporting[energy_category_id]['minimum'] - base[energy_category_id]['minimum']) /
base[energy_category_id]['minimum'] if (base[energy_category_id]['minimum'] is not None and
base[energy_category_id]['minimum'] > Decimal(0.0))
else None)
result['reporting_period']['maximums'].append(reporting[energy_category_id]['maximum'])
result['reporting_period']['maximums_per_unit_area'].append(
reporting[energy_category_id]['maximum'] / space['area']
if reporting[energy_category_id]['maximum'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['maximums_increment_rate'].append(
(reporting[energy_category_id]['maximum'] - base[energy_category_id]['maximum']) /
base[energy_category_id]['maximum']
if (base[energy_category_id]['maximum'] is not None and
base[energy_category_id]['maximum'] > Decimal(0.0))
else None)
result['reporting_period']['stdevs'].append(reporting[energy_category_id]['stdev'])
result['reporting_period']['stdevs_per_unit_area'].append(
reporting[energy_category_id]['stdev'] / space['area']
if reporting[energy_category_id]['stdev'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['stdevs_increment_rate'].append(
(reporting[energy_category_id]['stdev'] - base[energy_category_id]['stdev']) /
base[energy_category_id]['stdev'] if (base[energy_category_id]['stdev'] is not None and
base[energy_category_id]['stdev'] > Decimal(0.0))
else None)
result['reporting_period']['variances'].append(reporting[energy_category_id]['variance'])
result['reporting_period']['variances_per_unit_area'].append(
reporting[energy_category_id]['variance'] / space['area']
if reporting[energy_category_id]['variance'] is not None and
space['area'] is not None and
space['area'] > Decimal(0.0)
else None)
result['reporting_period']['variances_increment_rate'].append(
(reporting[energy_category_id]['variance'] - base[energy_category_id]['variance']) /
base[energy_category_id]['variance'] if (base[energy_category_id]['variance'] is not None and
base[energy_category_id]['variance'] > Decimal(0.0))
else None)
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
# export result to Excel file and then encode the file to base64 string
result['excel_bytes_base64'] = excelexporters.spacestatistics.export(result,
space['name'],
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
resp.body = json.dumps(result)
|
the-stack_0_2352 | import logging
from boto3.resources.action import ServiceAction, WaiterAction
from boto3.resources.params import create_request_parameters
from botocore import xform_name
from aioboto3.resources.response import AIOResourceHandler, AIORawHandler
logger = logging.getLogger(__name__)
class AIOServiceAction(ServiceAction):
def __init__(self, action_model, factory=None, service_context=None):
self._action_model = action_model
# In the simplest case we just return the response, but if a
# resource is defined, then we must create these before returning.
resource_response_model = action_model.resource
if resource_response_model:
self._response_handler = AIOResourceHandler(
search_path=resource_response_model.path,
factory=factory, resource_model=resource_response_model,
service_context=service_context,
operation_name=action_model.request.operation
)
else:
self._response_handler = AIORawHandler(action_model.path)
async def __call__(self, parent, *args, **kwargs):
operation_name = xform_name(self._action_model.request.operation)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._action_model.request)
params.update(kwargs)
logger.debug('Calling %s:%s with %r', parent.meta.service_name,
operation_name, params)
response = await getattr(parent.meta.client, operation_name)(**params)
logger.debug('Response: %r', response)
return await self._response_handler(parent, params, response)
class AioBatchAction(ServiceAction):
async def __call__(self, parent, *args, **kwargs):
service_name = None
client = None
responses = []
operation_name = xform_name(self._action_model.request.operation)
# Unlike the simple action above, a batch action must operate
# on batches (or pages) of items. So we get each page, construct
# the necessary parameters and call the batch operation.
async for page in parent.pages():
params = {}
for index, resource in enumerate(page):
# There is no public interface to get a service name
# or low-level client from a collection, so we get
# these from the first resource in the collection.
if service_name is None:
service_name = resource.meta.service_name
if client is None:
client = resource.meta.client
create_request_parameters(
resource, self._action_model.request,
params=params, index=index)
if not params:
# There are no items, no need to make a call.
break
params.update(kwargs)
logger.debug('Calling %s:%s with %r',
service_name, operation_name, params)
response = await (getattr(client, operation_name)(**params))
logger.debug('Response: %r', response)
responses.append(
self._response_handler(parent, params, response))
return responses
class AIOWaiterAction(WaiterAction):
async def __call__(self, parent, *args, **kwargs):
"""
Perform the wait operation after building operation
parameters.
:type parent: :py:class:`~boto3.resources.base.ServiceResource`
:param parent: The resource instance to which this action is attached.
"""
client_waiter_name = xform_name(self._waiter_model.waiter_name)
# First, build predefined params and then update with the
# user-supplied kwargs, which allows overriding the pre-built
# params if needed.
params = create_request_parameters(parent, self._waiter_model)
params.update(kwargs)
logger.debug('Calling %s:%s with %r',
parent.meta.service_name,
self._waiter_resource_name, params)
client = parent.meta.client
waiter = client.get_waiter(client_waiter_name)
response = await waiter.wait(**params)
logger.debug('Response: %r', response)
|
the-stack_0_2353 | """The test provides the basic capabilities to run numerous property tests."""
from datetime import timedelta
from datetime import datetime
import functools
import traceback
import shutil
import random
import os
import numpy as np
from property_auxiliary import distribute_command_line_arguments
from property_auxiliary import process_command_line_arguments
from property_auxiliary import get_random_string
from property_auxiliary import run_property_test
from property_auxiliary import print_rslt_ext
from property_auxiliary import collect_tests
from property_auxiliary import finish
def choose_module(inp_dict):
"""Chooses a module with probability proportional to number of stored tests."""
prob_dist = np.array([])
for module in inp_dict.keys():
prob_dist = np.append(prob_dist, len(inp_dict[module]))
prob_dist = prob_dist / np.sum(prob_dist)
return np.random.choice(list(inp_dict.keys()), p=prob_dist)
def run(args):
"""This function runs the property test battery."""
args = distribute_command_line_arguments(args)
test_dict = collect_tests()
rslt = dict()
for module in test_dict.keys():
rslt[module] = dict()
for test in test_dict[module]:
rslt[module][test] = [0, 0]
if args["is_check"]:
np.random.seed(args["seed"])
module = choose_module(test_dict)
test = np.random.choice(test_dict[module])
run_property_test(module, test)
else:
err_msg = []
start, timeout = datetime.now(), timedelta(hours=args["hours"])
print_rslt = functools.partial(print_rslt_ext, start, timeout)
print_rslt(rslt, err_msg)
while True:
seed = random.randrange(1, 100000)
dirname = get_random_string()
np.random.seed(seed)
module = choose_module(test_dict)
test = np.random.choice(test_dict[module])
try:
run_property_test(module, test, dirname)
rslt[module][test][0] += 1
except Exception:
rslt[module][test][1] += 1
msg = traceback.format_exc()
err_msg += [(module, test, seed, msg)]
os.chdir("../")
shutil.rmtree(dirname)
print_rslt(rslt, err_msg)
if timeout < datetime.now() - start:
break
finish(rslt)
if __name__ == "__main__":
args = process_command_line_arguments("property")
run(args)
|
the-stack_0_2354 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('message', '0002_message_date'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='date',
),
]
|
the-stack_0_2356 | """Tests for chebyshev module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
run_module_suite
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_chebint(self):
# check exceptions
assert_raises(ValueError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k])
res = cheb.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
res = cheb.chebint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c) for c in c2d])
res = cheb.chebint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
res = cheb.chebint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(object):
def test_chebder(self):
# check exceptions
assert_raises(ValueError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = cheb.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
res = cheb.chebder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebder(c) for c in c2d])
res = cheb.chebder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_chebvander(self):
# check for 1d x
x = np.arange(3)
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
def test_chebvander2d(self):
# also tests chebval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = cheb.chebvander2d(x1, x2, [1, 2])
tgt = cheb.chebval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_chebvander3d(self):
# also tests chebval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3])
tgt = cheb.chebval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(object):
def test_chebfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
assert_raises(TypeError, cheb.chebfit, [], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, cheb.chebfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = cheb.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
#
coef4 = cheb.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
#
coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = cheb.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
# test fitting only even polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = cheb.chebfit(x, y, 4)
assert_almost_equal(cheb.chebval(x, coef1), y)
coef2 = cheb.chebfit(x, y, [0, 2, 4])
assert_almost_equal(cheb.chebval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(-1, 1, 10)
for deg in range(0, 10):
for p in range(0, deg + 1):
c = cheb.chebinterpolate(powx, deg, (p,))
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
assert_raises(ValueError, cheb.chebcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(cheb.chebcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
class TestGauss(object):
def test_100(self):
x, w = cheb.chebgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = cheb.chebvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.pi
assert_almost_equal(w.sum(), tgt)
class TestMisc(object):
def test_chebfromroots(self):
res = cheb.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = cheb.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res), trim(tgt))
def test_chebroots(self):
assert_almost_equal(cheb.chebroots([1]), [])
assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = cheb.chebroots(cheb.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, cheb.chebtrim, coef, -1)
# Test results
assert_equal(cheb.chebtrim(coef), coef[:-1])
assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
assert_equal(cheb.chebtrim(coef, 2), [0])
def test_chebline(self):
assert_equal(cheb.chebline(3, 4), [3, 4])
def test_cheb2poly(self):
for i in range(10):
assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self):
for i in range(10):
assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)[1:-1]
tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x))
res = cheb.chebweight(x)
assert_almost_equal(res, tgt)
def test_chebpts1(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts1, 1.5)
assert_raises(ValueError, cheb.chebpts1, 0)
#test points
tgt = [0]
assert_almost_equal(cheb.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(cheb.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(cheb.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(cheb.chebpts1(4), tgt)
def test_chebpts2(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts2, 1.5)
assert_raises(ValueError, cheb.chebpts2, 1)
#test points
tgt = [-1, 1]
assert_almost_equal(cheb.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(cheb.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
if __name__ == "__main__":
run_module_suite()
|
the-stack_0_2357 | # Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
class _CutPlane():
def __init__(self, flow_data, x1='x', x2='y', x3_value=None):
"""
Initialize CutPlane object. Used to extract a 2D plane from a
3D vectoral velocity field
Args:
flow_data (np.array): 3D vector field of velocity data
x1 (str, optional): first dimension. Defaults to 'x'.
x2 (str, optional): second dimension. Defaults to 'y'.
x3_value (str, optional): third dimension. Defaults to None.
"""
# Assign the axis names
self.x1_name = x1
self.x2_name = x2
#TODO: if it will be assumed that x3 is one of x, y, or z that is x1 and x2,
# then we should verify that x1 and x2 are one of x, y, or z
self.x3_name = [x3 for x3 in ['x', 'y', 'z'] if x3 not in [x1, x2]][0]
# Get x1, x2 and x3 arrays
x1_array = getattr(flow_data, self.x1_name)
x2_array = getattr(flow_data, self.x2_name)
x3_array = getattr(flow_data, self.x3_name)
search_values = np.array(sorted(np.unique(x3_array)))
nearest_idx = (np.abs(search_values - x3_value)).argmin()
nearest_value = search_values[nearest_idx]
print('Nearest value in %s to %.2f is %.2f' %
(self.x3_name, x3_value, nearest_value))
# Select down the data
x3_select_mask = x3_array == nearest_value
# Store the un-interpolated input arrays at this slice
self.x1_in = x1_array[x3_select_mask]
self.x2_in = x2_array[x3_select_mask]
self.u_in = flow_data.u[x3_select_mask]
self.v_in = flow_data.v[x3_select_mask]
self.w_in = flow_data.w[x3_select_mask]
# Initially, x1_lin, x2_lin are unique values of input
self.x1_lin = np.unique(self.x1_in)
self.x2_lin = np.unique(self.x2_in)
# Save the resolution as the number of unique points in x1 and x2
self.resolution = (len(np.unique(self.x1_lin)),
len(np.unique(self.x2_lin)))
# Make initial meshing
self._remesh()
def _remesh(self):
# Mesh and interpolate u, v and w
self.x1_mesh, self.x2_mesh = np.meshgrid(self.x1_lin, self.x2_lin)
self.u_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.u_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
self.v_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.v_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
self.w_mesh = griddata(
np.column_stack([self.x1_in, self.x2_in]),
self.w_in, (self.x1_mesh.flatten(), self.x2_mesh.flatten()),
method='cubic')
# Save flat vectors
self.x1_flat = self.x1_mesh.flatten()
self.x2_flat = self.x2_mesh.flatten()
# Save u-cubed
self.u_cubed = self.u_mesh**3
# Define horizontal subclass
class HorPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a horizontal plane.
"""
def __init__(self, flow_data, z_value):
"""
Initialize horizontal CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
z_value (float): vertical position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='x', x2='y', x3_value=z_value)
# Define cross plane subclass
class CrossPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a cross-stream plane.
"""
def __init__(self, flow_data, x_value):
"""
Initialize cross-stream CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
x_value (float): streamwise position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='y', x2='z', x3_value=x_value)
# Define cross plane subclass
class VertPlane(_CutPlane):
"""
Subclass of _CutPlane. Shortcut to extracting a streamwise-vertical plane.
"""
def __init__(self, flow_data, y_value):
"""
Initialize streamwise-vertical CutPlane
Args:
flow_data (np.array): 3D vector field of velocity data
y_value (float): spanwise position through which to slice
"""
# Set up call super
super().__init__(flow_data, x1='x', x2='z', x3_value=y_value)
## Modification functions
def set_origin(cut_plane, center_x1=0.0, center_x2=0.0):
"""
Establish the origin of a CutPlane object.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
center_x1 (float, optional): x1-coordinate of orign.
Defaults to 0.0.
center_x2 (float, optional): x2-coordinate of orign.
Defaults to 0.0.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.x1_in = cut_plane.x1_in - center_x1
cut_plane.x2_in = cut_plane.x2_in - center_x2
cut_plane.x1_lin = cut_plane.x1_lin - center_x1
cut_plane.x2_lin = cut_plane.x2_lin - center_x2
# Remesh
cut_plane._remesh()
return cut_plane
def change_resolution(cut_plane, resolution=(100, 100)):
"""
Modify default resolution of a CutPlane object.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
resolution (tuple, optional): Desired resolution in x1 and x2.
Defaults to (100, 100).
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Grid the data
cut_plane.x1_lin = np.linspace(min(cut_plane.x1_in), max(cut_plane.x1_in),
resolution[0])
cut_plane.x2_lin = np.linspace(min(cut_plane.x2_in), max(cut_plane.x2_in),
resolution[1])
# Save the new resolution
cut_plane.resolution = resolution
# Redo the mesh
cut_plane._remesh()
# Return the cutplane
return cut_plane
def interpolate_onto_array(cut_plane, x1_array, x2_array):
"""
Interpolate a CutPlane object onto specified coordinate arrays.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
x1_array (np.array): specified x1-coordinate.
x2_array (np.array): specified x2-coordinate.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Grid the data given array
cut_plane.x1_lin = x1_array
cut_plane.x2_lin = x2_array
# Save the new resolution
cut_plane.resolution = (len(np.unique(cut_plane.x1_lin)),
len(np.unique(cut_plane.x2_lin)))
# Redo the mesh
cut_plane._remesh()
# Return the cutplane
return cut_plane
def rescale_axis(cut_plane, x1_factor=1.0, x2_factor=1.0):
"""
Stretch or compress CutPlane coordinates.
Args:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
plane of data.
x1_factor (float): scaling factor for x1-coordinate.
x2_factor (float): scaling factor for x2-coordinate.
Returns:
cut_plane (:py:class:`floris.tools.cut_plane._CutPlane`):
updated plane of data.
"""
# Store the un-interpolated input arrays at this slice
cut_plane.x1_in = cut_plane.x1_in / x1_factor
cut_plane.x2_in = cut_plane.x2_in / x2_factor
cut_plane.x1_lin = cut_plane.x1_lin / x1_factor
cut_plane.x2_lin = cut_plane.x2_lin / x2_factor
# Remesh
cut_plane._remesh()
return cut_plane
def calculate_wind_speed(cross_plane, x1_loc, x2_loc, R):
"""
Calculate effective wind speed within specified range of a point.
Args:
cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
plane of data.
x1_loc (float): x1-coordinate of point of interst.
x2_loc (float): x2-coordinate of point of interst.
R (float): radius from point of interst to consider
Returns:
(float): effective wind speed
"""
# Make a distance column
distance = np.sqrt((cross_plane.x1_flat - x1_loc)**2 +
(cross_plane.x2_flat - x2_loc)**2)
# Return the mean wind speed
return np.cbrt(np.mean(cross_plane.u_cubed[distance < R]))
def calculate_power(cross_plane,
x1_loc,
x2_loc,
R,
ws_array,
cp_array,
air_density=1.225):
"""
Calculate maximum power available in a given cross plane.
Args:
cross_plane (:py:class:`floris.tools.cut_plane.CrossPlane`):
plane of data.
x1_loc (float): x1-coordinate of point of interst.
x2_loc (float): x2-coordinate of point of interst.
R (float): Radius of wind turbine rotor.
ws_array (np.array): reference wind speed for cp curve.
cp_array (np.array): cp curve at reference wind speeds.
air_density (float, optional): air density. Defaults to 1.225.
Returns:
float: Power!
"""
# Compute the ws
ws = calculate_wind_speed(cross_plane, x1_loc, x2_loc, R)
# Compute the cp
cp_value = np.interp(ws, ws_array, cp_array)
#Return the power
return 0.5 * air_density * (np.pi * R**2) * cp_value * ws**3
# def get_profile(self, R, x2_loc, resolution=100, x1_locs=None):
# if x1_locs is None:
# x1_locs = np.linspace(
# min(self.x1_flat), max(self.x1_flat), resolution)
# v_array = np.array([self.calculate_wind_speed(
# x1_loc, x2_loc, R) for x1_loc in x1_locs])
# return x1_locs, v_array)
# def get_power_profile(self, ws_array, cp_array, rotor_radius, air_density=1.225, resolution=100, x1_locs=None):
# # Get the wind speed profile
# x1_locs, v_array = self.get_profile(resolution=resolution, x1_locs=x1_locs)
# # Get Cp
# cp_array = np.interp(v_array,ws_array,cp_array)
# # Return power array
# return x1_locs, 0.5 * air_density * (np.pi * rotor_radius**2) * cp_array * v_array**3
|
the-stack_0_2358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2009, Benjamin Kampmann <[email protected]>
# Copyright 2014, Hartmut Goebel <[email protected]>
# Copyright 2018, Pol Canelles <[email protected]>
from twisted.internet import reactor
from coherence.base import Coherence
from coherence.upnp.core import DIDLLite
# browse callback
def process_media_server_browse(result, client):
print(f"browsing root of: {client.device.get_friendly_name()}")
print(f"result contains: {result['NumberReturned']}", end=' ')
print(f"out of {result['TotalMatches']} total matches.")
elt = DIDLLite.DIDLElement.fromString(result['Result'])
for item in elt.getItems():
if item.upnp_class.startswith("object.container"):
print(" container", item.title, f"({item.id})", end=' ')
print("with", item.childCount, "items.")
if item.upnp_class.startswith("object.item"):
print(" item", item.title, f"({item.id}).")
# called for each media server found
def media_server_found(device):
print(f"Media Server found: {device.get_friendly_name()}")
d = device.client.content_directory.browse(
0,
browse_flag='BrowseDirectChildren',
process_result=False,
backward_compatibility=False)
d.addCallback(process_media_server_browse, device.client)
# sadly they sometimes get removed as well :(
def media_server_removed(*args):
print(f'Media Server gone: {args}')
def start():
# Initialize coherence and make sure that
# at least we have one server to explore
coherence = Coherence(
{'logmode': 'warning',
'controlpoint': 'yes',
'plugin': [
{'backend': 'LolcatsStore',
'name': 'Cohen3 LolcatsStore',
'proxy': 'no',
},
]
}
)
coherence.bind(coherence_device_detection_completed=media_server_found)
coherence.bind(coherence_device_removed=media_server_removed)
if __name__ == "__main__":
reactor.callWhenRunning(start)
reactor.run()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.