id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
373883
|
<reponame>CellProfiling/test-challenge<filename>testchallenge/__main__.py
"""Score predictions for the test challenge."""
from testchallenge.scoring import score
def main():
"""Launch scorer."""
score()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
12809060
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import json
import logging
import sys
from typing import Callable, Any, Dict, Optional, List, Union, Type, TYPE_CHECKING
_LOGGER = logging.getLogger(__name__)
if TYPE_CHECKING:
from azure.core.pipeline.transport._base import _HttpResponseBase
__all__ = [
"AzureError",
"ServiceRequestError",
"ServiceResponseError",
"HttpResponseError",
"DecodeError",
"ResourceExistsError",
"ResourceNotFoundError",
"ClientAuthenticationError",
"ResourceModifiedError",
"ResourceNotModifiedError",
"TooManyRedirectsError",
"ODataV4Format",
"ODataV4Error",
]
def raise_with_traceback(exception, *args, **kwargs):
# type: (Callable, Any, Any) -> None
"""Raise exception with a specified traceback.
This MUST be called inside a "except" clause.
:param Exception exception: Error type to be raised.
:param args: Any additional args to be included with exception.
:keyword str message: Message to be associated with the exception. If omitted, defaults to an empty string.
"""
message = kwargs.pop("message", "")
exc_type, exc_value, exc_traceback = sys.exc_info()
# If not called inside a "except", exc_type will be None. Assume it will not happen
if exc_type is None:
raise ValueError("raise_with_traceback can only be used in except clauses")
exc_msg = "{}, {}: {}".format(message, exc_type.__name__, exc_value)
error = exception(exc_msg, *args, **kwargs)
try:
raise error.with_traceback(exc_traceback)
except AttributeError:
error.__traceback__ = exc_traceback
raise error
class ErrorMap(object):
"""Error Map class. To be used in map_error method, behaves like a dictionary.
It returns the error type if it is found in custom_error_map. Or return default_error
:param dict custom_error_map: User-defined error map, it is used to map status codes to error types.
:keyword error default_error: Default error type. It is returned if the status code is not found in custom_error_map
"""
def __init__(self, custom_error_map=None, **kwargs):
self._custom_error_map = custom_error_map or {}
self._default_error = kwargs.pop("default_error", None)
def get(self, key):
ret = self._custom_error_map.get(key)
if ret:
return ret
return self._default_error
def map_error(status_code, response, error_map):
if not error_map:
return
error_type = error_map.get(status_code)
if not error_type:
return
error = error_type(response=response)
raise error
class ODataV4Format(object):
"""Class to describe OData V4 error format.
http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
Example of JSON:
error: {
"code": "ValidationError",
"message": "One or more fields contain incorrect values: ",
"details": [
{
"code": "ValidationError",
"target": "representation",
"message": "Parsing error(s): String '' does not match regex pattern '^[^{}/ :]+(?: :\\\\d+)?$'.
Path 'host', line 1, position 297."
},
{
"code": "ValidationError",
"target": "representation",
"message": "Parsing error(s): The input OpenAPI file is not valid for the OpenAPI specificate
https: //github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md
(schema https://github.com/OAI/OpenAPI-Specification/blob/master/schemas/v2.0/schema.json)."
}
]
}
:param dict json_object: A Python dict representing a ODataV4 JSON
:ivar str ~.code: Its value is a service-defined error code.
This code serves as a sub-status for the HTTP error code specified in the response.
:ivar str message: Human-readable, language-dependent representation of the error.
:ivar str target: The target of the particular error (for example, the name of the property in error).
This field is optional and may be None.
:ivar list[ODataV4Format] details: Array of ODataV4Format instances that MUST contain name/value pairs
for code and message, and MAY contain a name/value pair for target, as described above.
:ivar dict innererror: An object. The contents of this object are service-defined.
Usually this object contains information that will help debug the service.
"""
CODE_LABEL = "code"
MESSAGE_LABEL = "message"
TARGET_LABEL = "target"
DETAILS_LABEL = "details"
INNERERROR_LABEL = "innererror"
def __init__(self, json_object):
if "error" in json_object:
json_object = json_object["error"]
cls = self.__class__ # type: Type[ODataV4Format]
# Required fields, but assume they could be missing still to be robust
self.code = json_object.get(cls.CODE_LABEL) # type: Optional[str]
self.message = json_object.get(cls.MESSAGE_LABEL) # type: Optional[str]
if not (self.code or self.message):
raise ValueError("Impossible to extract code/message from received JSON:\n"+json.dumps(json_object))
# Optional fields
self.target = json_object.get(cls.TARGET_LABEL) # type: Optional[str]
# details is recursive of this very format
self.details = [] # type: List[ODataV4Format]
for detail_node in json_object.get(cls.DETAILS_LABEL, []):
try:
self.details.append(self.__class__(detail_node))
except Exception: # pylint: disable=broad-except
pass
self.innererror = json_object.get(cls.INNERERROR_LABEL, {}) # type: Dict[str, Any]
@property
def error(self):
import warnings
warnings.warn(
"error.error from azure exceptions is deprecated, just simply use 'error' once",
DeprecationWarning,
)
return self
def __str__(self):
return "({}) {}\n{}".format(
self.code,
self.message,
self.message_details()
)
def message_details(self):
"""Return a detailled string of the error.
"""
# () -> str
error_str = "Code: {}".format(self.code)
error_str += "\nMessage: {}".format(self.message)
if self.target:
error_str += "\nTarget: {}".format(self.target)
if self.details:
error_str += "\nException Details:"
for error_obj in self.details:
# Indent for visibility
error_str += "\n".join("\t" + s for s in str(error_obj).splitlines())
if self.innererror:
error_str += "\nInner error: {}".format(
json.dumps(self.innererror, indent=4)
)
return error_str
class AzureError(Exception):
"""Base exception for all errors.
:param message: The message object stringified as 'message' attribute
:keyword error: The original exception if any
:paramtype error: Exception
:ivar inner_exception: The exception passed with the 'error' kwarg
:vartype inner_exception: Exception
:ivar exc_type: The exc_type from sys.exc_info()
:ivar exc_value: The exc_value from sys.exc_info()
:ivar exc_traceback: The exc_traceback from sys.exc_info()
:ivar exc_msg: A string formatting of message parameter, exc_type and exc_value
:ivar str message: A stringified version of the message parameter
:ivar str continuation_token: A token reference to continue an incomplete operation. This value is optional
and will be `None` where continuation is either unavailable or not applicable.
"""
def __init__(self, message, *args, **kwargs):
self.inner_exception = kwargs.get("error")
self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()
self.exc_type = (
self.exc_type.__name__ if self.exc_type else type(self.inner_exception)
)
self.exc_msg = "{}, {}: {}".format(message, self.exc_type, self.exc_value)
self.message = str(message)
self.continuation_token = kwargs.get('continuation_token')
super(AzureError, self).__init__(self.message, *args)
def raise_with_traceback(self):
try:
raise super(AzureError, self).with_traceback(self.exc_traceback)
except AttributeError:
self.__traceback__ = self.exc_traceback
raise self
class ServiceRequestError(AzureError):
"""An error occurred while attempt to make a request to the service.
No request was sent.
"""
class ServiceResponseError(AzureError):
"""The request was sent, but the client failed to understand the response.
The connection may have timed out. These errors can be retried for idempotent or
safe operations"""
class ServiceRequestTimeoutError(ServiceRequestError):
"""Error raised when timeout happens"""
class ServiceResponseTimeoutError(ServiceResponseError):
"""Error raised when timeout happens"""
class HttpResponseError(AzureError):
"""A request was made, and a non-success status code was received from the service.
:param message: HttpResponse's error message
:type message: string
:param response: The response that triggered the exception.
:type response: ~azure.core.pipeline.transport.HttpResponse or ~azure.core.pipeline.transport.AsyncHttpResponse
:ivar reason: The HTTP response reason
:vartype reason: str
:ivar status_code: HttpResponse's status code
:vartype status_code: int
:ivar response: The response that triggered the exception.
:vartype response: ~azure.core.pipeline.transport.HttpResponse or ~azure.core.pipeline.transport.AsyncHttpResponse
:ivar model: The request body/response body model
:vartype model: ~msrest.serialization.Model
:ivar error: The formatted error
:vartype error: ODataV4Format
"""
def __init__(self, message=None, response=None, **kwargs):
# Don't want to document this one yet.
error_format = kwargs.get("error_format", ODataV4Format)
self.reason = None
self.status_code = None
self.response = response
if response:
self.reason = response.reason
self.status_code = response.status_code
# old autorest are setting "error" before calling __init__, so it might be there already
# transferring into self.model
model = kwargs.pop("model", None) # type: Optional[msrest.serialization.Model]
if model is not None: # autorest v5
self.model = model
else: # autorest azure-core, for KV 1.0, Storage 12.0, etc.
self.model = getattr(
self, "error", None
) # type: Optional[msrest.serialization.Model]
self.error = self._parse_odata_body(error_format, response) # type: Optional[ODataV4Format]
# By priority, message is:
# - odatav4 message, OR
# - parameter "message", OR
# - generic meassage using "reason"
if self.error:
message = str(self.error)
else:
message = message or "Operation returned an invalid status '{}'".format(
self.reason
)
super(HttpResponseError, self).__init__(message=message, **kwargs)
@staticmethod
def _parse_odata_body(error_format, response):
# type: (Type[ODataV4Format], _HttpResponseBase) -> Optional[ODataV4Format]
try:
odata_json = json.loads(response.text())
return error_format(odata_json)
except Exception: # pylint: disable=broad-except
# If the body is not JSON valid, just stop now
pass
return None
class DecodeError(HttpResponseError):
"""Error raised during response deserialization."""
class ResourceExistsError(HttpResponseError):
"""An error response with status code 4xx.
This will not be raised directly by the Azure core pipeline."""
class ResourceNotFoundError(HttpResponseError):
""" An error response, typically triggered by a 412 response (for update) or 404 (for get/post)
"""
class ClientAuthenticationError(HttpResponseError):
"""An error response with status code 4xx.
This will not be raised directly by the Azure core pipeline."""
class ResourceModifiedError(HttpResponseError):
"""An error response with status code 4xx, typically 412 Conflict.
This will not be raised directly by the Azure core pipeline."""
class ResourceNotModifiedError(HttpResponseError):
"""An error response with status code 304.
This will not be raised directly by the Azure core pipeline."""
class TooManyRedirectsError(HttpResponseError):
"""Reached the maximum number of redirect attempts."""
def __init__(self, history, *args, **kwargs):
self.history = history
message = "Reached maximum redirect attempts."
super(TooManyRedirectsError, self).__init__(message, *args, **kwargs)
class ODataV4Error(HttpResponseError):
"""An HTTP response error where the JSON is decoded as OData V4 error format.
http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
:ivar dict odata_json: The parsed JSON body as attribute for convenience.
:ivar str ~.code: Its value is a service-defined error code.
This code serves as a sub-status for the HTTP error code specified in the response.
:ivar str message: Human-readable, language-dependent representation of the error.
:ivar str target: The target of the particular error (for example, the name of the property in error).
This field is optional and may be None.
:ivar list[ODataV4Format] details: Array of ODataV4Format instances that MUST contain name/value pairs
for code and message, and MAY contain a name/value pair for target, as described above.
:ivar dict innererror: An object. The contents of this object are service-defined.
Usually this object contains information that will help debug the service.
"""
_ERROR_FORMAT = ODataV4Format
def __init__(self, response, **kwargs):
# type: (_HttpResponseBase, Any) -> None
# Ensure field are declared, whatever can happen afterwards
self.odata_json = None # type: Optional[Dict[str, Any]]
try:
self.odata_json = json.loads(response.text())
odata_message = self.odata_json.setdefault("error", {}).get("message")
except Exception: # pylint: disable=broad-except
# If the body is not JSON valid, just stop now
odata_message = None
self.code = None # type: Optional[str]
self.message = kwargs.get("message", odata_message) # type: Optional[str]
self.target = None # type: Optional[str]
self.details = [] # type: Optional[List[Any]]
self.innererror = {} # type: Optional[Dict[str, Any]]
if self.message and "message" not in kwargs:
kwargs["message"] = self.message
super(ODataV4Error, self).__init__(response=response, **kwargs)
self._error_format = None # type: Optional[Union[str, ODataV4Format]]
if self.odata_json:
try:
error_node = self.odata_json["error"]
self._error_format = self._ERROR_FORMAT(error_node)
self.__dict__.update(
{
k: v
for k, v in self._error_format.__dict__.items()
if v is not None
}
)
except Exception: # pylint: disable=broad-except
_LOGGER.info("Received error message was not valid OdataV4 format.")
self._error_format = "JSON was invalid for format " + str(
self._ERROR_FORMAT
)
def __str__(self):
if self._error_format:
return str(self._error_format)
return super(ODataV4Error, self).__str__()
|
StarcoderdataPython
|
11257273
|
<filename>smapp_text_classifier/tests/test_vectorizers.py<gh_stars>1-10
import os
import shutil
import pandas as pd
from smapp_text_classifier.vectorizers import (CachedCountVectorizer,
CachedEmbeddingVectorizer)
# Directory for caching during tests
# Warning: if this directory already exists it will be deleted by the test
cache_dir = 'test_cache_dir'
# Test 'documents'
DOCS1 = pd.Series(['hello world', 'hello smapp', 'smapp clf'])
DOCS2 = pd.Series(['first new', 'second smapp', 'new world'])
def vectorizer_test(vec):
# If for some reason directory exists delete contents
if os.path.exists(cache_dir):
for f in os.listdir(cache_dir):
os.unlink(f)
# First transform from scratch
X_first = vec.fit_transform(DOCS1)
# Second transform from cache
X_second = vec.fit_transform(DOCS1)
# Assert that X_first and X_second are the same
assert (X_first != X_second).sum() == 0
# Transform of subset
subset = DOCS1.iloc[:2]
X_subset = vec.transform(subset)
assert (X_first[:2, :] != X_subset).sum() == 0
# Transform of new data
X_new = vec.transform(DOCS2)
assert X_new.shape == X_first.shape
# Clean up
shutil.rmtree(cache_dir)
def test_count_vectorizer():
vec = CachedCountVectorizer(
cache_dir=cache_dir,
ds_name='test',
ngram_range=(1, 1),
analyzer='word'
)
vectorizer_test(vec)
#def test_embedding_vectorizer():
#
# vec = CachedEmbeddingVectorizer(
# embedding_model_name='glove-wiki-gigaword-50',
# cache_dir=cache_dir,
# ds_name='test',
# pooling_method='mean',
# recompute=False,
# )
# vectorizer_test(vec)
|
StarcoderdataPython
|
9739930
|
from util.data_loader import burgers_data_loader
from util.generate_plots import *
# Resolution
n_spatial = 1281
n_temporal = 1001
# Load data
_, _, u_exact = burgers_data_loader(n_spatial=n_spatial, n_temporal=n_temporal)
# generate_contour_and_snapshots_plot(u=u_exact)
generate_contour_and_snapshots_plot(u=u_exact, savefig_path='plots/Fig1_burgers_exact.jpg')
|
StarcoderdataPython
|
6706404
|
<gh_stars>1-10
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import policy_pb2 as policy__pb2
class PolicyServiceStub(object):
"""PolicyService manages policy creation and definition
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.DeletePolicy = channel.unary_unary(
'/pomerium.dashboard.PolicyService/DeletePolicy',
request_serializer=policy__pb2.DeletePolicyRequest.SerializeToString,
response_deserializer=policy__pb2.DeletePolicyResponse.FromString,
)
self.GetPolicy = channel.unary_unary(
'/pomerium.dashboard.PolicyService/GetPolicy',
request_serializer=policy__pb2.GetPolicyRequest.SerializeToString,
response_deserializer=policy__pb2.GetPolicyResponse.FromString,
)
self.ListPolicies = channel.unary_unary(
'/pomerium.dashboard.PolicyService/ListPolicies',
request_serializer=policy__pb2.ListPoliciesRequest.SerializeToString,
response_deserializer=policy__pb2.ListPoliciesResponse.FromString,
)
self.SetPolicy = channel.unary_unary(
'/pomerium.dashboard.PolicyService/SetPolicy',
request_serializer=policy__pb2.SetPolicyRequest.SerializeToString,
response_deserializer=policy__pb2.SetPolicyResponse.FromString,
)
class PolicyServiceServicer(object):
"""PolicyService manages policy creation and definition
"""
def DeletePolicy(self, request, context):
"""DeletePolicy deletes an existing policy
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPolicy(self, request, context):
"""GetPolicy retrieves an existing policy
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListPolicies(self, request, context):
"""ListPolicies lists existing policies based on the ListPoliciesRequest
parameters
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetPolicy(self, request, context):
"""SetPolicy creates a new policy or, if the id is specified, updates an
existing policy
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PolicyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'DeletePolicy': grpc.unary_unary_rpc_method_handler(
servicer.DeletePolicy,
request_deserializer=policy__pb2.DeletePolicyRequest.FromString,
response_serializer=policy__pb2.DeletePolicyResponse.SerializeToString,
),
'GetPolicy': grpc.unary_unary_rpc_method_handler(
servicer.GetPolicy,
request_deserializer=policy__pb2.GetPolicyRequest.FromString,
response_serializer=policy__pb2.GetPolicyResponse.SerializeToString,
),
'ListPolicies': grpc.unary_unary_rpc_method_handler(
servicer.ListPolicies,
request_deserializer=policy__pb2.ListPoliciesRequest.FromString,
response_serializer=policy__pb2.ListPoliciesResponse.SerializeToString,
),
'SetPolicy': grpc.unary_unary_rpc_method_handler(
servicer.SetPolicy,
request_deserializer=policy__pb2.SetPolicyRequest.FromString,
response_serializer=policy__pb2.SetPolicyResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pomerium.dashboard.PolicyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PolicyService(object):
"""PolicyService manages policy creation and definition
"""
@staticmethod
def DeletePolicy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pomerium.dashboard.PolicyService/DeletePolicy',
policy__pb2.DeletePolicyRequest.SerializeToString,
policy__pb2.DeletePolicyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetPolicy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pomerium.dashboard.PolicyService/GetPolicy',
policy__pb2.GetPolicyRequest.SerializeToString,
policy__pb2.GetPolicyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListPolicies(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pomerium.dashboard.PolicyService/ListPolicies',
policy__pb2.ListPoliciesRequest.SerializeToString,
policy__pb2.ListPoliciesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetPolicy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pomerium.dashboard.PolicyService/SetPolicy',
policy__pb2.SetPolicyRequest.SerializeToString,
policy__pb2.SetPolicyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
StarcoderdataPython
|
3457523
|
#! /usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import Point, Twist
from math import atan2, pi, pow, sqrt, cos, sin
from std_msgs.msg import Empty
from time import time
from sensor_msgs.msg import LaserScan
import numpy as np
#initialize values
current_x = 0.0 #current x co-ord of robot
current_y = 0.0 #current y co-ord of robot
current_th = 0.0 #current orientation of the robot
dist = 0.0
turn = 0.0
vel = 0.0
achieved = True
resetted = False
#Set the goal coordinates
goal = Point()
goal.x = 6
goal.y = 2
sub_goal = Point()
sub_goal.x = 0
sub_goal.y = 0
#set the vector increments for a 0.5m radius
def turn_options(index):
global current_x
global current_y
global turn
turn = Point()
angle = 0
x = 0
y = 0
if index == 0:
angle = 90
if index == 1:
angle = 60
if index == 2:
angle = 30
if index == 3:
angle = 0
if index == 4:
angle = -30
if index == 5:
angle = -60
if index == 6:
angle = -90
if index == 7:
angle = -120
if index == 8:
angle = -150
if index == 9:
angle = 180
if index == 10:
angle = 150
if index == 11:
angle = 120
if index == 12:
angle = 90
turn.x = x + 0.5*cos(angle+current_th)
turn.y = y + 0.5*sin(angle+current_th)
return turn
#Obtain the shortest distance to the goal for a paticular set of co-ords
def dist_to_goal(x,y):
global goal
vect = sqrt(pow((goal.x -x),2) + pow((goal.y-y),2))
#print ("Distance to goal: %s" %(vect))
return vect
#Is there an obstacle within 0.7m of the robot on a chosen path? If yes, eliminate that path as an option
def check_ranges(distance):
if distance < 1.2:
return False
return True
#Find the x and y co-ords of the goal
def find_angle(goal_x,goal_y, x, y):
global goal
inc_x = goal.x - x
inc_y = goal.y - y
#use tan to find the angle needed to turn towards the goal
angle_to_goal = atan2(inc_y, inc_x) #tanx = O/A
#convert angle to degrees
angle_to_goal = angle_to_goal*(180/pi)
return angle_to_goal
#set the ranges attributed to each option
def steering(data):
global sub_goal
global achieved
#1000 is dummy value so that a value is not considered
laser_ranges = [None]*7 #an array to store the range values
new_coords = [None]*7 #an array to store potential new co-ords to move to
no_obstruction = [0]*7 #an array to store viable new co-ords (no obstruction present)
closest = [50]*7 #an array to store the distances of the new co-ords from the goal
#In gazebo left to right(0 -> 6), on robot right to left (6 <- 0)
laser = np.asarray(data.ranges)
laser = np.nan_to_num(laser)
laser = np.where(laser == 0, data.range_max + 10, laser) #laser is temp array that converts nan values to maximum range
laser = np.where(laser > 30, data.range_max + 10, laser) #nan is where the distance is outwith range
six = laser[608:719]
five = laser[509:608]
four = laser[409:508]
three = laser[313:408]
two = laser[213:312]
one = laser[113:212]
zero = laser[0:112]
#an array of the ranges
laser_ranges = [zero, one, two, three, four, five, six]
i = 0
j = 0
if(achieved == False):
#print 'I am not there yet!'
return
if(resetted == False):
return
for i in range(7):
if(goal.x < 0):
new_coords[i] = turn_options(i+6)
else:
new_coords[i] = turn_options(i) #adds the new co-ords to the array
closest[i] = dist_to_goal(turn_options(i).x, turn_options(i).y) #adds distance to goal
if min(laser_ranges[i]) > 1.2: #checks if there is an obstruction
no_obstruction[i] = 1 #This is a viable option
else:
no_obstruction[i] = 0
closest[i] = 20*closest[i]
print no_obstruction #*20 to make sure that obstructed co-ords are not seen as closest
print closest
#There is an obstruction present
for j in range(7):
if (no_obstruction[j] == 1) and (closest[j] == min(closest)):
print j #checks laser ranges and dist to goal
sub_goal = new_coords[j]
print 'goals'
print sub_goal.x
print sub_goal.y
achieved = False #sets subgoal to co-ords with no obstructions...
else: #...is closest to the goal
print 'nowhere to go!'
#Odometry callback
def newOdom(msg):
global current_x
global current_y
global current_th
current_x = msg.pose.pose.position.x #set global variable
current_y = msg.pose.pose.position.y #set global variable
roll = pitch = current_th = 0.0
rot_q = msg.pose.pose.orientation
#obtain the angle 'yaw' using a quaternion to euler converter
(roll, pitch, current_th) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
#convert the angle to degrees
#th = atan2(y,x)
current_th = current_th*(180/pi)
#set up nodes
rospy.init_node("speed_controller", anonymous = True)
sub = rospy.Subscriber("/odom", Odometry, newOdom)
pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist, queue_size =1)
speed = Twist()
# set up the odometry reset publisher
reset_odom = rospy.Publisher('/mobile_base/commands/reset_odometry', Empty, queue_size=10)
scan_sub = rospy.Subscriber('/scan', LaserScan, steering)
# reset odometry values (these messages take a few iterations to get through)
timer = time()
#the longer the timer set the more accurate the odometry initializes
while time() - timer < 1.5:
reset_odom.publish(Empty())
resetted = True
r = rospy.Rate(10)
#Main method
while not rospy.is_shutdown():
#obtain the x,y vector to goal
inc_x = sub_goal.x - current_x
inc_y = sub_goal.y - current_y
#use tan to find the angle needed to turn towards the goal
angle_to_goal = atan2(inc_y, inc_x) #tanx = O/A
#convert angle to degrees
angle_to_goal = angle_to_goal*(180/pi)
#find the difference between the angle of the bot and angle needed to turn
angle = (angle_to_goal-current_th)
print ("x: %s y: %s th: %s" % (current_x, current_y, current_th))
#check if the bot is within a suitable angle to the goal
#4.5 degree error is a comprimise between speed and accuracy
if angle > 4.5 or angle < -4.5:
print angle
speed.linear.x = 0.0
if(angle < -4.5):
speed.angular.z = -0.15
if angle >= 4.5:
speed.angular.z = 0.15
elif -4.5 <= angle <= 4.5:
speed.linear.x = 0.3
speed.angular.z = 0.0
#check if the bot has reached the goal
if -0.1 <= inc_x <= 0.1 and -0.1 <= inc_y <= 0.1:
speed.linear.x = 0
speed.angular.z = 0
print 'I am here!'
achieved = True
pub.publish(speed)
r.sleep()
rospy.spin()
|
StarcoderdataPython
|
3225870
|
<gh_stars>0
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.storage.blob import BlobServiceClient
try:
import settings_real as settings
except ImportError:
import blob_settings_fake as settings
from testcase import (
StorageTestCase,
TestMode,
record
)
SOURCE_FILE = 'SampleSource.txt'
class TestCommonBlobSamples(StorageTestCase):
connection_string = settings.CONNECTION_STRING
def setUp(self):
data = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
with open(SOURCE_FILE, 'wb') as stream:
stream.write(data)
super(TestCommonBlobSamples, self).setUp()
def tearDown(self):
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
for container in ['containerformyblobs', 'containerfordeletedblobs', 'leasemyblobscontainer']:
try:
blob_service_client.delete_container(container)
except HttpResponseError:
pass
if os.path.isfile(SOURCE_FILE):
try:
os.remove(SOURCE_FILE)
except:
pass
return super(TestCommonBlobSamples, self).tearDown()
#--Begin Blob Samples-----------------------------------------------------------------
@record
def test_blob_snapshots(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerformyblobs")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get a BlobClient for a specific blob
blob_client = blob_service_client.get_blob_client(container="containerformyblobs", blob="my_blob")
# [START create_blob_snapshot]
# Create a read-only snapshot of the blob at this point in time
snapshot_blob = blob_client.create_snapshot()
# Get the snapshot ID
print(snapshot_blob.get('snapshot'))
# [END create_blob_snapshot]
# Delete only the snapshot (blob itself is retained)
blob_client.delete_blob(delete_snapshots="only")
# Delete container
blob_service_client.delete_container("containerformyblobs")
@record
def test_soft_delete_and_undelete_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Create a retention policy to retain deleted blobs
from azure.storage.blob import RetentionPolicy
delete_retention_policy = RetentionPolicy(enabled=True, days=1)
# Set the retention policy on the service
blob_service_client.set_service_properties(delete_retention_policy=delete_retention_policy)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("containerfordeletedblobs")
# Create new Container
try:
container_client.create_container()
except ResourceExistsError:
# Container already created
pass
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
blob_client = container_client.upload_blob(name="my_blob", data=data)
# Soft delete blob in the container (blob can be recovered with undelete)
blob_client.delete_blob()
# [START undelete_blob]
# Undelete the blob before the retention policy expires
blob_client.undelete_blob()
# [END undelete_blob]
# [START get_blob_properties]
properties = blob_client.get_blob_properties()
# [END get_blob_properties]
assert properties is not None
# Delete container
blob_service_client.delete_container("containerfordeletedblobs")
@record
def test_acquire_lease_on_blob(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("leasemyblobscontainer")
# Create new Container
container_client.create_container()
# Upload a blob to the container
with open(SOURCE_FILE, "rb") as data:
container_client.upload_blob(name="my_blob", data=data)
# Get the blob client
blob_client = blob_service_client.get_blob_client("leasemyblobscontainer", "my_blob")
# [START acquire_lease_on_blob]
# Acquire a lease on the blob
lease = blob_client.acquire_lease()
# Delete blob by passing in the lease
blob_client.delete_blob(lease=lease)
# [END acquire_lease_on_blob]
# Delete container
blob_service_client.delete_container("leasemyblobscontainer")
@record
def test_start_copy_blob_from_url_and_abort_copy(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# Instantiate a ContainerClient
container_client = blob_service_client.get_container_client("copyblobcontainer")
# Create new Container
container_client.create_container()
try:
# [START copy_blob_from_url]
# Get the blob client with the source blob
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("copyblobcontainer", '59466-0.txt')
# start copy and check copy status
copy = copied_blob.start_copy_from_url(source_blob)
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END copy_blob_from_url]
copy_id = props.copy.id
# [START abort_copy_blob_from_url]
# Passing in copy id to abort copy operation
copied_blob.abort_copy(copy_id)
# check copy status
props = copied_blob.get_blob_properties()
print(props.copy.status)
# [END abort_copy_blob_from_url]
finally:
blob_service_client.delete_container("copyblobcontainer")
|
StarcoderdataPython
|
5008197
|
<reponame>almahdiy/IT_PDP_Conference<gh_stars>0
from django.db import models
#For the Q&A session; PDPs are going to be able to submit questions and vote on already submitted questions
class Question(models.Model):
"""
Model for PDPs to submit their questions.
"""
body = models.TextField(default='')
votes = models.IntegerField(default=0)
isAppropriate = models.BooleanField(default=False)
def __str__(self):
return self.body
class Authentication(models.Model):
"""
Authentication mechanism so that only PDPs present at the auditorium can access the website.
"""
sessionID = models.CharField(max_length=250)
#Multiple choice questions for the Ice Breaker activity
class MCQ(models.Model):
"""
Model for the Ice Breaker activity.
One-to-many relationship with MCQOption
One-to-many relationship with MAC
"""
question = models.TextField(default='')
open_for_voting = models.BooleanField(default=True)
class MAC(models.Model):
#Foreign keys represent many-to-one relationships
#having a MAC address associated with an MCQ means that the user of that device
#has already submitted a vote for this question, so they cannot vote multiple times
# MCQs = models.ManyToManyField(MCQ, null=True)
mac_address = models.IntegerField(default=0)
class MCQOption(models.Model):
#Foreign keys represent many-to-one relationships
MCQ_id = models.ForeignKey('MCQ', on_delete=models.CASCADE, null=True,) # When the question is deleted, the option will also be deleted.
option = models.TextField(default='')
totalVotes = models.IntegerField(default=0)
#table to help validate the vote before
class OptionVoting(models.Model):
MCQ_id = models.ForeignKey('MCQ', on_delete=models.CASCADE, null=True,) # When the question is deleted, the option will also be deleted.
#MCQOption_id = models.ForeignKey('MCQOption', on_delete=models.CASCADE, null=True,) # When the question is deleted, the option will also be deleted.
unique = models.IntegerField(default=0) #MAC address, IP, ...etc
#table to help validate the vote before
class QuestionVoting(models.Model):
question_id = models.ForeignKey('Question', on_delete=models.CASCADE, null=True,) # When the question is deleted, the option will also be deleted.
unique = models.CharField(default="0.0.0.0", max_length=15) #MAC address, IP, ...etc
|
StarcoderdataPython
|
207589
|
<filename>imputeTSpy/locf.py
import numpy as np
import pandas as pd
from check_data import check_data, consecutive
from tsAirgap import ts_airgap, ts_heating, ts_nh4
#from impyute.ops import error
<EMAIL>
<EMAIL>
def locf(data, na_remaining = "rev", maxgap = None):
""" Last Observation Carried Forward
For each set of missing indices, use the value of one row before(same
column). In the case that the missing value is the first row, look one
row ahead instead. If this next row is also NaN, look to the next row.
Repeat until you find a row in this column that's not NaN. All the rows
before will be filled with this value.
Parameters
----------
data: numpy.array, list or pandas.Series
Data to impute.
na_remaining : Method to be used for remaining nan (if missing number apear in the first observation) :
"keep" - to return the series with NAs
"mean" - to replace remaining NAs by overall mean
"rev" - to perform nocb / locf from the reverse direction
maxgap : Maximum number of successive NAs to still perform imputation on. Default setting is to replace all NAs without restrictions. With this option set, consecutive nan runs, that are longer than 'maxgap' will be left nan. This option mostly makes sense if you want to treat long runs of nan afterwards separately
Returns
-------
numpy.array
Imputed data.
Examples
------
import imputeTSpy
data = imputeTSpy.ts_nh4()
data_fill_locf = imputeTSpy.locf(data)
data_fill_nocb = imputeTSpy.nocb(data)
"""
data = check_data(data)
nan_xy = np.argwhere(np.isnan(data))
nan_xy_idx = np.array([x[0] for x in nan_xy])
if maxgap != None :
z = consecutive(nan_xy_idx)
exc = []
for i in range(len(z)) :
if len(z[i]) > maxgap :
exc.extend(z[i])
nan_xy_idx = nan_xy_idx[np.isin(nan_xy_idx, exc) == False]
else :
pass
n = data.shape[0]
n_int = np.arange(n)#[x for x in range(n)]
data_cp = data.copy()
for i in nan_xy_idx :
try :
cdd = n_int [n_int > i]
idx_rep = np.min(cdd[np.isin(cdd, nan_xy_idx) == False])
data_cp[i] = data_cp[idx_rep]
except :
if na_remaining == "rev" :
cdd = n_int [n_int < i]
idx_rep = np.max(cdd[np.isin(cdd, nan_xy_idx) == False])
data_cp[i] = data_cp[idx_rep]
elif na_remaining == "mean":
idx_rep = np.mean(data[np.isnan(data) == False])
data_cp[i] = idx_rep
elif na_remaining == "keep":
pass
else :
raise("the option is invalid, please fill valid option!!!!")
return data_cp
def nocb(data, axis=0, na_remaining = "rev", maxgap = None):
""" Next Observation Carried Backward
For each set of missing indices, use the value of one row before(same
column). In the case that the missing value is the first row, look one
row ahead instead. If this next row is also NaN, look to the next row.
Repeat until you find a row in this column that's not NaN. All the rows
before will be filled with this value.
Parameters
----------
data: numpy.array, list or pandas.Series
Data to impute.
na_remaining : Method to be used for remaining nan (if missing number apear in the first observation) :
"keep" - to return the series with NAs
"mean" - to replace remaining NAs by overall mean
"rev" - to perform nocb / locf from the reverse direction
maxgap : Maximum number of successive NAs to still perform imputation on. Default setting is to replace all NAs without restrictions. With this option set, consecutive nan runs, that are longer than 'maxgap' will be left nan. This option mostly makes sense if you want to treat long runs of nan afterwards separately
Returns
-------
numpy.ndarray
Imputed data.
Examples
------
import imputeTSpy
data = imputeTSpy.ts_nh4()
data_fill_locf = imputeTSpy.locf(data)
data_fill_nocb = imputeTSpy.nocb(data)
"""
data = check_data(data)
nan_xy = np.argwhere(np.isnan(data))
nan_xy_idx = np.array([x[0] for x in nan_xy])
if maxgap != None :
z = consecutive(nan_xy_idx)
exc = []
for i in range(len(z)) :
if len(z[i]) > maxgap :
exc.extend(z[i])
nan_xy_idx = nan_xy_idx[np.isin(nan_xy_idx, exc) == False]
else :
pass
n = data.shape[0]
n_int = np.arange(n)#[x for x in range(n)]
data_cp = data.copy()
for i in nan_xy_idx :
try :
cdd = n_int [n_int < i]
idx_rep = np.min(cdd[np.isin(cdd, nan_xy_idx) == False])
data_cp[i] = data_cp[idx_rep]
except :
if na_remaining == "rev" :
cdd = n_int [n_int > i]
idx_rep = np.max(cdd[np.isin(cdd, nan_xy_idx) == False])
data_cp[i] = data_cp[idx_rep]
elif na_remaining == "mean":
idx_rep = np.mean(data[np.isnan(data) == False])
data_cp[i] = idx_rep
elif na_remaining == "keep":
pass
else :
raise("the option is invalid, please fill valid option!!!!")
return data_cp
#data = ts_nh4()
#data[-2:] =[np.nan, np.nan]
#nan_xy = np.argwhere(np.isnan(data))
#nan_xy_idx = np.array([x[0] for x in nan_xy])
#n = data.shape[0]
#n_int = np.arange(n)#[x for x in range(n)]
#
#np.diff(np.append(i, z)) != 1
#max_gap = 10
#
#
##z = nan_xy_idx[nan_xy_idx > i]
##a = np.array([0, 47, 48, 49, 50, 97, 98, 99])
#if maxgap != None :
# z = consecutive(nan_xy_idx)
# exc = []
# for i in range(len(z)) :
# if len(z[i]) > max_gap :
# exc.extend(z[i])
# nan_xy_idx = nan_xy_idx[np.isin(nan_xy_idx, exc) == False]
#else :
# pass
#
#data_cp = data.copy()
#na_remaining = "mean"
#for i in nan_xy_idx :
# try :
# cdd = n_int [n_int > i]
# idx_rep = np.min(cdd[np.isin(cdd, nan_xy_idx) == False])
# data_cp[i] = data_cp[idx_rep]
# except :
# if na_remaining == "rev" :
# cdd = n_int [n_int < i]
# idx_rep = np.max(cdd[np.isin(cdd, nan_xy_idx) == False])
# data_cp[i] = data_cp[idx_rep]
# elif na_remaining == "mean":
# idx_rep = np.nanmean(data)
# data_cp[i] = idx_rep
# elif na_remaining == "keep":
# pass
# else :
# raise("the option is invalid, please fill valid option!!!!")
#
#
#z = nan_xy_idx[nan_xy_idx > i]
|
StarcoderdataPython
|
8017562
|
from django.contrib.auth import authenticate, login, logout
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from styleguide_example.api.mixins import ApiAuthMixin
from styleguide_example.users.selectors import user_get_login_data
class UserLoginApi(APIView):
"""
Following https://docs.djangoproject.com/en/3.1/topics/auth/default/#how-to-log-a-user-in
"""
class InputSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
def post(self, request):
serializer = self.InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
print(request.user)
user = authenticate(request, **serializer.validated_data)
print(user)
if user is None:
return Response(status=status.HTTP_401_UNAUTHORIZED)
login(request, user)
data = user_get_login_data(user=user)
session_key = request.session.session_key
return Response({
'session': session_key,
'data': data
})
class UserLogoutApi(APIView):
def get(self, request):
logout(request)
return Response()
def post(self, request):
logout(request)
return Response()
class UserMeApi(ApiAuthMixin, APIView):
def get(self, request):
data = user_get_login_data(user=request.user)
return Response(data)
|
StarcoderdataPython
|
9608463
|
from sys import exit
from os import path
from glob import glob
from PIL import Image
from PIL.ExifTags import TAGS
from plotly.graph_objects import Layout, Figure
def main():
# get path to directory
filepath = input('Path to directory (blank if PWD): ')
# check if valid path
if filepath != '':
if not path.isdir(filepath):
exit('Invalid path')
data, lats, lons, flag, good, bad = [], [None, None], [None, None], False, 0, 0
# ask if user wants too show images' metadata (added for my zine)
meta = input('\nShow metadata? (y/n): ')
if meta.lower() in ('yes', 'y'):
metaflag = True
elif meta.lower() in ('no', 'n'):
metaflag = False
else:
exit('Invalid response')
# read all files in dir
for file in glob(filepath + '*'):
file = file.lower()
# skip non-image files
if not file.endswith('.jpg') and not file.endswith('.jpeg') and not file.endswith('.png'):
continue
# extract Exif data from image
exif = {
TAGS[key]: val
for key, val in Image.open(file).getexif().items()
if key in TAGS
}
# extract GPS + datetime
try:
loc = exif['GPSInfo']
dt = exif['DateTimeOriginal']
good += 1
# skip if either missing
except KeyError:
bad += 1
continue
# extract latitude and longitude
lat = { 'dir': loc[1], 'deg': loc[2][0], 'min': loc[2][1], 'sec': loc[2][2] }
lon = { 'dir': loc[3], 'deg': loc[4][0], 'min': loc[4][1], 'sec': loc[4][2] }
# clean and print metadata
if metaflag:
cleanLat = str(lat['deg']) + '° ' + str(lat['min']) + '\' ' + str(lat['sec']) + '\" ' + str(lat['dir'])
cleanLon = str(lon['deg']) + '° ' + str(lon['min']) + '\' ' + str(lon['sec']) + '\" ' + str(lon['dir'])
print(f'File: {file} Latitude: {cleanLat} Longitude: {cleanLon} Time: {dt}')
# calculate full coordinate with degree, minute, second
truLat = float(lat['deg'] + (lat['min'] / 60.0) + (lat['sec'] / 3600.0))
truLon = float(lon['deg'] + (lon['min'] / 60.0) + (lon['sec'] / 3600.0))
# calculate mins and maxes
if flag:
lons[0], lons[1] = min(lons[0], truLon), max(lons[1], truLon)
lats[0], lats[1] = min(lats[0], truLat), max(lats[1], truLat)
# first time just assign values and flip flag
else:
lons[0], lons[1] = truLon, truLon
lats[0], lats[1] = truLat, truLat
flag = True
data.append({
'img': file, 'lat': lat, 'lon': lon, 'datetime': dt, 'truLat': truLat, 'truLon': truLon
})
# not enough valid images
if good <= 1:
exit('Didn\'t find enough valid image files for a visualization.')
print(f'\nExtracted metadata from {good} files. Unable to extract from {bad}.\n')
# prompt for viz choice
q = input('Please enter the number corresponding to your visualization of choice:\n1: Unsorted path\n2: Sorted path\n3: Both paths overlaid\n\n#: ')
# validate user input
while q not in ('1', '2', '3'):
q = input('#: ')
q = int(q)
coords, sortedCoords, unSortedData = 'M ', 'M ', None
# copy data, add first point
if q == 1 or q == 3:
unSortedData = data.copy()
coords += str(unSortedData[0]['truLat']) + ',' + str(unSortedData[0]['truLon']) + ' '
# sort data, add first point
if q == 2 or q == 3:
data.sort(key = lambda x:x['datetime'])
sortedCoords += str(data[0]['truLat']) + ',' + str(data[0]['truLon']) + ' '
# append rest of points
for i in range(1, good):
if q == 1 or q == 3:
coords += ('L' + str(unSortedData[i]['truLat']) + ',' + str(unSortedData[i]['truLon']) + ' ')
if q == 2 or q == 3:
sortedCoords += ('L' + str(data[i]['truLat']) + ',' + str(data[i]['truLon']) + ' ')
paths = []
# if using unsorted, append path
if coords != 'M ':
paths.append({ 'type': 'path', 'path': coords, 'line_color': '#3CB371' })
# if using sorted, append path
if sortedCoords != 'M ':
paths.append({ 'type': 'path', 'path': sortedCoords, 'line_color': '#6666FF' })
fig = Figure(layout = Layout(plot_bgcolor = 'RGBA(1,1,1,0)'))
# draw axes from min to max
fig.update_xaxes(range = [lats[0], lats[1]], color = '#FFFFFF')
fig.update_yaxes(range = [lons[0], lons[1]], color = '#FFFFFF')
fig.update_layout(shapes = paths)
fig.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
329890
|
import os.path
import re
import string
import unicodedata
from typing import List, Sequence
def clean_word(word: str, allowed_chars: str = string.ascii_letters) -> str:
"""
Remove all accents and non-allowed characters from the given word, and uppercase it
>>> clean_word('Mongolië')
MONGOLIE
"""
word = remove_accents(word).upper()
return ''.join(letter for letter in word if letter in allowed_chars)
def remove_accents(word: str) -> str:
"""
Return the same word with all accents removed
>>> remove_accents('Mongolië')
'Mongolie'
"""
nfkd_form = unicodedata.normalize('NFKD', word)
return u''.join([c for c in nfkd_form if not unicodedata.combining(c)])
def remove_word_from_word(word_to_remove: str, total_word: str) -> str:
"""
Remove the letters from word_to_remove from total_word
>>> remove_word_from_word('ZOEN', 'ZOEVEN')
'VE'
>>> remove_word_from_word('ZOEK', 'ZOEVEN')
'VEN'
"""
for letter in word_to_remove:
total_word = re.sub(letter, '', total_word, count=1)
return total_word
def get_root_dir():
return os.path.dirname(os.path.dirname(__file__))
def get_evenly_spaced_elements(nr_elements: int, input_sequence: Sequence) -> List:
"""
Use Bresenham's line algorithm to select `nr_elements` spaced evenly from the `input_sequence`
Sequences are the general term for ordered sets. Among others, these are lists or Django's QuerySets.
Definition: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
Implementation: https://stackoverflow.com/a/9873804
"""
len_sequence = len(input_sequence)
if nr_elements > len_sequence:
nr_elements = len_sequence
indices = [i * len_sequence // nr_elements + len_sequence // (2 * nr_elements) for i in range(nr_elements)]
return [input_sequence[index] for index in indices]
if __name__ == '__main__':
print(clean_word('TROMPETNEUSHOORN'))
print(clean_word('Mongolië'))
|
StarcoderdataPython
|
1861028
|
<reponame>AYaddaden/attention-learn-to-route
# module genius.py
#
# Copyright (c) 2018 <NAME>
#
"""
genius module - Implements GENIUS, an algorithm for generation of a solution.
"""
__version__="1.0"
from pctsp.model.pctsp import *
from pctsp.model import solution
import numpy as np
def genius(pctsp):
s = solution.random(pctsp, size=3)
s = geni(pstsp, s)
s = us(pctsp, s)
return s
def geni(pctsp, s):
return
def us(pctsp, s):
return
|
StarcoderdataPython
|
5185685
|
from thefuck import utils
from thefuck.utils import replace_argument
@utils.git_support
def match(command, settings):
return ('fatal: Not a git repository' in command.stderr
and "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)." in command.stderr)
@utils.git_support
def get_new_command(command, settings):
return replace_argument(command.script, 'pull', 'clone')
|
StarcoderdataPython
|
3494435
|
import numpy as np
import json
grid = np.fromfile("altfilter.bin", dtype='uint8')
area = {
"NE": "",
"NW": "",
"SE": "",
"SW": ""
}
def exist(lng, lat, x, y):
grid_x = (lng + 180) * 3 + x
grid_y = (lat + 90) * 3 + y
byte = grid_x / 8
bit = 1 << (grid_x % 8)
return grid[grid_y * 135 + byte] & bit != 0
def base64(value):
b64 = ""
while value != 0:
bit6 = value & 0x3f
value >>= 6
encode = chr(bit6 + 0x3f)
b64 += encode
while len(b64)<4:
b64 += "="
return b64
def add_area(lng, lat, grid_pattern):
if lng<0:
if lat<0:
lng = -lng
lat = -lat
a = "SW"
else:
lng = -lng
a = "NW"
else:
if lat<0:
lat = - lat
a = "SE"
else:
a = "NE"
lng_lat_b64 = base64((lat<<17) + (lng<<9) + grid_pattern)
coordinate = lng_lat_b64
area[a] += coordinate
for lat in xrange(0, 180):
print(lat)
for lng in xrange(0, 360):
grid_pattern = 0
for y in xrange(0,3):
for x in xrange(0,3):
if exist(lng-180, lat-90, x, y):
bit = 1 << (y*3+x)
grid_pattern |= bit
if grid_pattern > 0:
add_area(lng-180, lat-90, grid_pattern)
output = open("altfilter.json", "w")
area_json = json.dumps(area)
output.write(area_json)
output.close()
|
StarcoderdataPython
|
6401691
|
<gh_stars>1-10
def Owasp_top():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----Owasp-top-10-----")
print(green+"\nThis option tell about the 10 most common application vulnerabilities.\n\ncommand:\n\netw --info or netw --i\nthen it will provide you list of those vulneabilites\n\nselect any one of them to know about them."+nc)
def password():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----password_detalis-----")
print(green+"\nThis option is used to crack passwords\ncommand:\nnetw --pass"+nc)
def cve_detalis():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----cve_detalis-----")
print(green+"\nThis option gives you cve details\nCVE, short for Common Vulnerabilities and Exposures, \nis a list of publicly disclosed computer security flaws. \nWhen someone refers to a CVE, they mean a security flaw that's been assigned a CVE ID number.\nSecurity advisories issued by vendors and researchers almost always mention at least one CVE ID. \nCVEs help IT professionals coordinate their efforts to prioritize and address these vulnerabilities \nto make computer systems more secure.\ncommand:\nnetw --cve\nexample\nnetw --cve CVE-2019-19781" +nc )
def apache_server():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----apache_server-----")
print(green + "\nThis option is used to start an Apache server.\ncommand:\nnetw --server"+nc)
def directory_finder():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----directory_finder----")
print(green+"\nThis option is used to find the directory\ncommand:\nnetw --d"+nc)
def nmap_scan():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----nmap_scan-----")
print(green+"\nThis option is used to check the number of open ports in an IP address\ncommand:\nnetw --scan or netw --s (ip address)"+nc)
def payload_venom():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----payload_venom-----")
print(green+"\nHere we give the payload number which tells in which platform or language it is going to be used.\nAfter the command is executed it prints the directory where the payload is created.\nThe -p flag: Specifies what payload to generate\nThe -f flag: Specifies the format of the payload\ncommand:\nnetw --payload (number)"+nc)
def hash_identify():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----hash_identify-----")
print(green+"\nAn input will be give as hash.\nIt will tell which hash algorithm is used.\ncommand:\nnetw --hash"+nc)
def searchsploit():
nc='\033[0m'
green='\033[0;32m'
print(green+"\n-----searchsploit-----")
print(green+"\nIt will give scripts/vulnerabilites of the parameter or technologies given in command.\nWe can also download the script.\ncommand:\nnetw --search"+nc)
|
StarcoderdataPython
|
3267728
|
<reponame>maclema/aws-parallelcluster<gh_stars>100-1000
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os as os_lib
import pytest
from assertpy import assert_that
from pcluster.aws.iam import IamClient
from tests.utils import MockedBoto3Request
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.aws.common.boto3"
def test_get_instance_profile(boto3_stubber):
os_lib.environ["AWS_DEFAULT_REGION"] = "us-east-1"
instance_profile_name = "mocked_instance_profile_name"
response = {
"InstanceProfile": {
"InstanceProfileName": instance_profile_name,
"Path": "/",
"InstanceProfileId": "SomeIdOfLengthAtLeastSixteen",
"Arn": f"arn:aws:iam:::instance-profile/{instance_profile_name}",
"CreateDate": "2021-07-28",
"Roles": [
{
"Arn": f"arn:aws:iam:::role/{instance_profile_name}",
"Path": "/",
"RoleName": instance_profile_name,
"RoleId": "AnotherIdOfLengthAtLeastSixteen",
"CreateDate": "2021-07-28",
}
],
}
}
mocked_requests = [
MockedBoto3Request(
method="get_instance_profile",
response=response,
expected_params={"InstanceProfileName": instance_profile_name},
)
]
boto3_stubber("iam", mocked_requests)
assert_that(IamClient().get_instance_profile(instance_profile_name)).is_equal_to(response)
def test_get_role(boto3_stubber):
os_lib.environ["AWS_DEFAULT_REGION"] = "us-east-1"
role_name = "mocked_role_name"
response = {
"Role": {
"RoleName": role_name,
"Path": "/",
"RoleId": "SomeIdOfLengthAtLeastSixteen",
"Arn": f"arn:aws:iam:::role/{role_name}",
"CreateDate": "2021-07-28",
}
}
mocked_requests = [
MockedBoto3Request(
method="get_role",
response=response,
expected_params={"RoleName": role_name},
)
]
boto3_stubber("iam", mocked_requests)
assert_that(IamClient().get_role(role_name)).is_equal_to(response)
def test_get_policy(boto3_stubber):
os_lib.environ["AWS_DEFAULT_REGION"] = "us-east-1"
policy_name = "mocked_policy_name"
policy_arn = f"arn:aws:iam:::policy/{policy_name}"
response = {
"Policy": {
"PolicyName": policy_name,
"Path": "/",
"PolicyId": "SomeIdOfLengthAtLeastSixteen",
"Arn": policy_arn,
"CreateDate": "2021-07-28",
}
}
mocked_requests = [
MockedBoto3Request(
method="get_policy",
response=response,
expected_params={"PolicyArn": policy_arn},
)
]
boto3_stubber("iam", mocked_requests)
assert_that(IamClient().get_policy(policy_arn)).is_equal_to(response)
|
StarcoderdataPython
|
3369365
|
<gh_stars>0
import os
"""Default configuration
Use env var to override
"""
DEBUG = True
SECRET_KEY = "changeme"
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or "sqlite:////tmp/myapi.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ['access', 'refresh']
CELERY_BROKER_URL = "amqp://guest:guest@localhost/"
CELERY_RESULT_BACKEND = "amqp://guest:guest@localhost/"
|
StarcoderdataPython
|
120848
|
import odespy
from vib_odespy import run_solvers_and_plot, RHS, \
VibSolverWrapper4Odespy, plt
from numpy import pi, sin
# Primary ODE: m=1, s(u)=(2*pi)**2*u, such that the period is 1.
# Then we add linear damping and a force term A*sin(w*t) where
# w is half and double of the frequency of the free oscillations.
ODEs = [
(RHS(b=0.1), 'Small damping, no forcing'),
(RHS(b=0.4), 'Medium damping, no forcing'),
(RHS(b=0.4, F=lambda t: 1*sin(0.5*pi*t)),
'Medium damping, medium forcing w/smaller frequency'),
(RHS(b=0.4, F=lambda t: 10*sin(0.5*pi*t)),
'Medium damping, large forcing w/smaller frequency'),
(RHS(b=1.2, F=lambda t: 10*sin(0.5*pi*t)),
'Strong damping, large forcing w/smaller frequency'),
(RHS(b=0.4, F=lambda t: 1*sin(2*pi*t)),
'Medium damping, medium forcing w/larger frequency'),
(RHS(b=0.4, F=lambda t: 10*sin(2*pi*t)),
'Medium damping, large forcing w/larger frequency'),
(RHS(b=1.2, F=lambda t: 10*sin(2*pi*t)),
'Strong damping, large forcing w/larger frequency'),
]
for rhs, title in ODEs:
solvers = [
odespy.ForwardEuler(rhs),
# Implicit methods must use Newton solver to converge
odespy.BackwardEuler(rhs, nonlinear_solver='Newton'),
odespy.CrankNicolson(rhs, nonlinear_solver='Newton'),
VibSolverWrapper4Odespy(rhs),
]
T = 20 # Period is 1
dt = 0.05 # 20 steps per period
filename = 'FEBNCN_' + title.replace(', ', '_').replace('w/', '')
title = title + ' (dt=%g)' % dt
plt.figure()
run_solvers_and_plot(solvers, rhs, T, dt, title=title,
filename=filename)
plt.show()
raw_input()
|
StarcoderdataPython
|
171433
|
"""Run parallel shallow water domain.
run using command like:
mpiexec -np m python run_parallel_sw_merimbula.py
where m is the number of processors to be used.
Will produce sww files with names domain_Pn_m.sww where m is number of processors and
n in [0, m-1] refers to specific processor that owned this part of the partitioned mesh.
"""
from __future__ import print_function
from __future__ import division
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
from builtins import str
from builtins import range
from past.utils import old_div
from future.utils import raise_
import os
import sys
import time
from anuga.utilities import parallel_abstraction as pypar
import numpy as num
import unittest
import tempfile
from struct import pack, unpack
from anuga.file.netcdf import NetCDFFile
import copy
#------------------------
# ANUGA Modules
#------------------------
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.utilities.util_ext import double_precision
from anuga.utilities.norms import l1_norm, l2_norm, linf_norm
from anuga import Domain
from anuga import Reflective_boundary
from anuga import Dirichlet_boundary
from anuga import Time_boundary
from anuga import Transmissive_boundary
from anuga import File_boundary
from anuga.file.mux import WAVEHEIGHT_MUX2_LABEL, EAST_VELOCITY_MUX2_LABEL, \
NORTH_VELOCITY_MUX2_LABEL
from anuga.file.mux import read_mux2_py
from anuga.file_conversion.urs2sts import urs2sts
from anuga.file.urs import Read_urs
from anuga.file.sts import create_sts_boundary
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.coordinate_transforms.redfearn import redfearn
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga import rectangular_cross_domain
from anuga.pmesh.mesh_interface import create_mesh_from_regions
from anuga import create_domain_from_file
from anuga.parallel import distribute, myid, numprocs, send, receive, barrier, finalize
from anuga.file.tests.test_mux import Test_Mux
verbose = False
class Test_urs2sts_parallel(Test_Mux):
""" A suite of tests to test urs2sts file conversion functions.
These tests are quite coarse-grained: converting a file
and checking that its headers and some of its contents
are correct.
"""
def sequential_time_varying_file_boundary_sts(self):
"""sequential_ltest_time_varying_file_boundary_sts_sequential(self):
Read correct points from ordering file and apply sts to boundary. The boundary is time varying. FIXME add to test_urs2sts.
"""
lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
[6.02,97.02],[6.00,97.02]]
tide = 3.0
time_step_count = 65
time_step = 2.
n=len(lat_long_points)
first_tstep=num.ones(n,int)
last_tstep=(time_step_count)*num.ones(n,int)
finaltime=float(time_step*(time_step_count-1))
yieldstep=float(time_step)
gauge_depth=20*num.ones(n,float)
ha=2*num.ones((n,time_step_count),float)
ua=10*num.ones((n,time_step_count),float)
va=-10*num.ones((n,time_step_count),float)
times=num.arange(0., float(time_step_count*time_step), time_step)
for i in range(n):
#ha[i]+=num.sin(times)
ha[i]+=old_div(times,finaltime)
sts_file="test"
if myid==0:
base_name, files = self.write_mux2(lat_long_points,
time_step_count,
time_step,
first_tstep,
last_tstep,
depth=gauge_depth,
ha=ha,
ua=ua,
va=va)
# base name will not exist, but 3 other files are created
# Write order file
file_handle, order_base_name = tempfile.mkstemp("")
os.close(file_handle)
os.remove(order_base_name)
d=","
order_file=order_base_name+'order.txt'
fid=open(order_file,'w')
# Write Header
header='index, longitude, latitude\n'
fid.write(header)
indices=[3,0,1]
for i in indices:
line=str(i)+d+str(lat_long_points[i][1])+d+\
str(lat_long_points[i][0])+"\n"
fid.write(line)
fid.close()
urs2sts(base_name,
basename_out=sts_file,
ordering_filename=order_file,
mean_stage=tide,
verbose=verbose)
self.delete_mux(files)
assert(os.access(sts_file+'.sts', os.F_OK))
os.remove(order_file)
barrier()
boundary_polygon = create_sts_boundary(sts_file)
# Append the remaining part of the boundary polygon to be defined by
# the user
bounding_polygon_utm=[]
for point in bounding_polygon:
zone,easting,northing=redfearn(point[0],point[1])
bounding_polygon_utm.append([easting,northing])
boundary_polygon.append(bounding_polygon_utm[3])
boundary_polygon.append(bounding_polygon_utm[4])
assert num.allclose(bounding_polygon_utm,boundary_polygon)
extent_res=1000000
meshname = 'urs_test_mesh' + '.tsh'
interior_regions=None
boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
# have to change boundary tags from last example because now bounding
# polygon starts in different place.
if myid==0:
create_mesh_from_regions(boundary_polygon,
boundary_tags=boundary_tags,
maximum_triangle_area=extent_res,
filename=meshname,
interior_regions=interior_regions,
verbose=verbose)
barrier()
domain_fbound = Domain(meshname)
domain_fbound.set_quantities_to_be_stored(None)
domain_fbound.set_quantity('stage', tide)
if verbose: print("Creating file boundary condition")
Bf = File_boundary(sts_file+'.sts',
domain_fbound,
boundary_polygon=boundary_polygon)
Br = Reflective_boundary(domain_fbound)
domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})
temp_fbound=num.zeros(int(old_div(finaltime,yieldstep))+1,float)
if verbose: print("Evolving domain with file boundary condition")
for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
temp_fbound[i]=domain_fbound.quantities['stage'].centroid_values[2]
if verbose: domain_fbound.write_time()
domain_drchlt = Domain(meshname)
domain_drchlt.set_quantities_to_be_stored(None)
domain_drchlt.set_starttime(time_step)
domain_drchlt.set_quantity('stage', tide)
Br = Reflective_boundary(domain_drchlt)
#Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
Bd = Time_boundary(domain=domain_drchlt, f=lambda t: [2.0+old_div(t,finaltime)+tide,220.+10.*tide+old_div(10.*t,finaltime),-220.-10.*tide-old_div(10.*t,finaltime)])
#Bd = Time_boundary(domain=domain_drchlt,f=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
temp_drchlt=num.zeros(int(old_div(finaltime,yieldstep))+1,float)
for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
temp_drchlt[i]=domain_drchlt.quantities['stage'].centroid_values[2]
#domain_drchlt.write_time()
#print domain_fbound.quantities['stage'].vertex_values
#print domain_drchlt.quantities['stage'].vertex_values
assert num.allclose(temp_fbound,temp_drchlt),temp_fbound-temp_drchlt
assert num.allclose(domain_fbound.quantities['stage'].vertex_values,
domain_drchlt.quantities['stage'].vertex_values)
assert num.allclose(domain_fbound.quantities['xmomentum'].vertex_values,
domain_drchlt.quantities['xmomentum'].vertex_values)
assert num.allclose(domain_fbound.quantities['ymomentum'].vertex_values,
domain_drchlt.quantities['ymomentum'].vertex_values)
if not sys.platform == 'win32':
if myid==0: os.remove(sts_file+'.sts')
if myid==0: os.remove(meshname)
def parallel_time_varying_file_boundary_sts(self):
""" parallel_test_time_varying_file_boundary_sts_sequential(self):
Read correct points from ordering file and apply sts to boundary.
The boundary is time varying. Compares sequential result with
distributed result found using anuga_parallel
"""
#------------------------------------------------------------
# Define test variables
#------------------------------------------------------------
lat_long_points=[[6.01,97.0],[6.02,97.0],[6.05,96.9],[6.0,97.0]]
bounding_polygon=[[6.0,97.0],[6.01,97.0],[6.02,97.0],
[6.02,97.02],[6.00,97.02]]
tide = 3.0
time_step_count = 65
time_step = 2
n=len(lat_long_points)
first_tstep=num.ones(n,int)
last_tstep=(time_step_count)*num.ones(n,int)
finaltime=float(time_step*(time_step_count-1))
yieldstep=float(time_step)
gauge_depth=20*num.ones(n,float)
ha=2*num.ones((n,time_step_count),float)
ua=10*num.ones((n,time_step_count),float)
va=-10*num.ones((n,time_step_count),float)
times=num.arange(0, time_step_count*time_step, time_step)
for i in range(n):
#ha[i]+=num.sin(times)
ha[i]+=old_div(times,finaltime)
#------------------------------------------------------------
# Write mux data to file then convert to sts format
#------------------------------------------------------------
sts_file="test"
if myid==0:
base_name, files = self.write_mux2(lat_long_points,
time_step_count,
time_step,
first_tstep,
last_tstep,
depth=gauge_depth,
ha=ha,
ua=ua,
va=va)
# base name will not exist, but 3 other files are created
# Write order file
file_handle, order_base_name = tempfile.mkstemp("")
os.close(file_handle)
os.remove(order_base_name)
d=","
order_file=order_base_name+'order.txt'
fid=open(order_file,'w')
# Write Header
header='index, longitude, latitude\n'
fid.write(header)
indices=[3,0,1]
for i in indices:
line=str(i)+d+str(lat_long_points[i][1])+d+\
str(lat_long_points[i][0])+"\n"
fid.write(line)
fid.close()
urs2sts(base_name,
basename_out=sts_file,
ordering_filename=order_file,
mean_stage=tide,
verbose=verbose)
self.delete_mux(files)
assert(os.access(sts_file+'.sts', os.F_OK))
os.remove(order_file)
barrier()
#------------------------------------------------------------
# Define boundary_polygon on each processor. This polygon defines the
# urs boundary and lies on a portion of the bounding_polygon
#------------------------------------------------------------
boundary_polygon = create_sts_boundary(sts_file)
# Append the remaining part of the boundary polygon to be defined by
# the user
bounding_polygon_utm=[]
for point in bounding_polygon:
zone,easting,northing=redfearn(point[0],point[1])
bounding_polygon_utm.append([easting,northing])
boundary_polygon.append(bounding_polygon_utm[3])
boundary_polygon.append(bounding_polygon_utm[4])
assert num.allclose(bounding_polygon_utm,boundary_polygon)
extent_res=10000
meshname = 'urs_test_mesh' + '.tsh'
interior_regions=None
boundary_tags={'ocean': [0,1], 'otherocean': [2,3,4]}
#------------------------------------------------------------
# Create mesh on the master processor and store in file. This file
# is read in by each slave processor when needed
#------------------------------------------------------------
if myid==0:
create_mesh_from_regions(boundary_polygon,
boundary_tags=boundary_tags,
maximum_triangle_area=extent_res,
filename=meshname,
interior_regions=interior_regions,
verbose=verbose)
# barrier()
domain_fbound = Domain(meshname)
domain_fbound.set_quantities_to_be_stored(None)
domain_fbound.set_quantity('stage', tide)
# print domain_fbound.mesh.get_boundary_polygon()
else:
domain_fbound=None
barrier()
if ( verbose and myid == 0 ):
print('DISTRIBUTING PARALLEL DOMAIN')
domain_fbound = distribute(domain_fbound)
#--------------------------------------------------------------------
# Find which sub_domain in which the interpolation points are located
#
# Sometimes the interpolation points sit exactly
# between two centroids, so in the parallel run we
# reset the interpolation points to the centroids
# found in the sequential run
#--------------------------------------------------------------------
interpolation_points = [[279000,664000], [280250,664130],
[279280,665400], [280500,665000]]
interpolation_points=num.array(interpolation_points)
#if myid==0:
# import pylab as P
# boundary_polygon=num.array(boundary_polygon)
# P.plot(boundary_polygon[:,0],boundary_polygon[:,1])
# P.plot(interpolation_points[:,0],interpolation_points[:,1],'ko')
# P.show()
fbound_gauge_values = []
fbound_proc_tri_ids = []
for i, point in enumerate(interpolation_points):
fbound_gauge_values.append([]) # Empty list for timeseries
try:
k = domain_fbound.get_triangle_containing_point(point)
if domain_fbound.tri_full_flag[k] == 1:
fbound_proc_tri_ids.append(k)
else:
fbound_proc_tri_ids.append(-1)
except:
fbound_proc_tri_ids.append(-2)
if verbose: print('P%d has points = %s' %(myid, fbound_proc_tri_ids))
#------------------------------------------------------------
# Set boundary conditions
#------------------------------------------------------------
Bf = File_boundary(sts_file+'.sts',
domain_fbound,
boundary_polygon=boundary_polygon)
Br = Reflective_boundary(domain_fbound)
domain_fbound.set_boundary({'ocean': Bf,'otherocean': Br})
#------------------------------------------------------------
# Evolve the domain on each processor
#------------------------------------------------------------
for i, t in enumerate(domain_fbound.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
stage = domain_fbound.get_quantity('stage')
for i in range(4):
if fbound_proc_tri_ids[i] > -1:
fbound_gauge_values[i].append(stage.centroid_values[fbound_proc_tri_ids[i]])
#------------------------------------------------------------
# Create domain to be run sequntially on each processor
#------------------------------------------------------------
domain_drchlt = Domain(meshname)
domain_drchlt.set_quantities_to_be_stored(None)
domain_drchlt.set_starttime(time_step)
domain_drchlt.set_quantity('stage', tide)
Br = Reflective_boundary(domain_drchlt)
#Bd = Dirichlet_boundary([2.0+tide,220+10*tide,-220-10*tide])
Bd = Time_boundary(domain=domain_drchlt, function=lambda t: [2.0+old_div(t,finaltime)+tide,220.+10.*tide+old_div(10.*t,finaltime),-220.-10.*tide-old_div(10.*t,finaltime)])
#Bd = Time_boundary(domain=domain_drchlt,function=lambda t: [2.0+num.sin(t)+tide,10.*(2+20.+num.sin(t)+tide),-10.*(2+20.+num.sin(t)+tide)])
domain_drchlt.set_boundary({'ocean': Bd,'otherocean': Br})
drchlt_gauge_values = []
drchlt_proc_tri_ids = []
for i, point in enumerate(interpolation_points):
drchlt_gauge_values.append([]) # Empty list for timeseries
try:
k = domain_drchlt.get_triangle_containing_point(point)
if domain_drchlt.tri_full_flag[k] == 1:
drchlt_proc_tri_ids.append(k)
else:
drchlt_proc_tri_ids.append(-1)
except:
drchlt_proc_tri_ids.append(-2)
if verbose: print('P%d has points = %s' %(myid, drchlt_proc_tri_ids))
#------------------------------------------------------------
# Evolve entire domain on each processor
#------------------------------------------------------------
for i, t in enumerate(domain_drchlt.evolve(yieldstep=yieldstep,
finaltime=finaltime,
skip_initial_step = False)):
stage = domain_drchlt.get_quantity('stage')
for i in range(4):
drchlt_gauge_values[i].append(stage.centroid_values[drchlt_proc_tri_ids[i]])
#------------------------------------------------------------
# Compare sequential values with parallel values
#------------------------------------------------------------
barrier()
success = True
for i in range(4):
if fbound_proc_tri_ids[i] > -1:
fbound_gauge_values[i]=num.array(fbound_gauge_values[i])
drchlt_gauge_values[i]=num.array(drchlt_gauge_values[i])
#print i,fbound_gauge_values[i][4]
#print i,drchlt_gauge_values[i][4]
success = success and num.allclose(fbound_gauge_values[i], drchlt_gauge_values[i])
assert success#, (fbound_gauge_values[i]-drchlt_gauge_values[i])
#assert_(success)
if not sys.platform == 'win32':
if myid==0: os.remove(sts_file+'.sts')
if myid==0: os.remove(meshname)
# Because we are doing assertions outside of the TestCase class
# the PyUnit defined assert_ function can't be used.
def assert_(condition, msg="Assertion Failed"):
if condition == False:
#pypar.finalize()
raise_(AssertionError, msg)
# Test an nprocs-way run of the shallow water equations
# against the sequential code.
if __name__=="__main__":
#verbose=False
if myid ==0 and verbose:
print('PARALLEL START')
suite = unittest.makeSuite(Test_urs2sts_parallel,'parallel_test')
#suite = unittest.makeSuite(Test_urs2sts_parallel,'sequential_test')
runner = unittest.TextTestRunner()
runner.run(suite)
#------------------------------------------
# Run the code code and compare sequential
# results at 4 gauge stations
#------------------------------------------
finalize()
|
StarcoderdataPython
|
3462470
|
<reponame>gooaah/GraphINVENT<filename>tools/utils.py
"""
Miscellaneous functions.
"""
import rdkit
from rdkit.Chem.rdmolfiles import SmilesMolSupplier
def load_molecules(path : str) -> rdkit.Chem.rdmolfiles.SmilesMolSupplier:
"""
Reads a SMILES file (full path/filename specified by `path`) and returns the
`rdkit.Mol` object "supplier".
"""
# check first line of SMILES file to see if contains header
with open(path) as smi_file:
first_line = smi_file.readline()
has_header = bool("SMILES" in first_line)
smi_file.close()
# read file
molecule_set = SmilesMolSupplier(path, sanitize=True, nameColumn=-1, titleLine=has_header)
return molecule_set
|
StarcoderdataPython
|
6552177
|
import pytest
from .. import (expression_walker, expressions, logic,
solver_datalog_extensional_db)
from ..exceptions import NeuroLangException
from ..existential_datalog import (ExistentialDatalog, Implication,
SolverNonRecursiveExistentialDatalog)
from ..expressions import ExpressionBlock, Query
from ..solver_datalog_naive import NULL, UNDEFINED, Fact
C_ = expressions.Constant
S_ = expressions.Symbol
F_ = expressions.FunctionApplication
EP_ = logic.ExistentialPredicate
class SolverWithoutExistentialResolution(
ExistentialDatalog,
solver_datalog_extensional_db.ExtensionalDatabaseSolver,
expression_walker.ExpressionBasicEvaluator,
):
pass
class SolverWithExistentialResolution(
SolverNonRecursiveExistentialDatalog,
solver_datalog_extensional_db.ExtensionalDatabaseSolver,
expression_walker.ExpressionBasicEvaluator,
):
pass
def test_existential_intensional_database():
solver = SolverWithoutExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
assert 'P' in solver.symbol_table
assert 'P' in solver.existential_intensional_database()
assert 'P' not in solver.intensional_database()
def test_bad_existential_formulae():
solver = SolverWithoutExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
with pytest.raises(NeuroLangException):
solver.walk(Implication(EP_(y, P(x, y)), Q(x, y)))
with pytest.raises(NeuroLangException):
solver.walk(Implication(EP_(y, P(x)), Q(x)))
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
def test_existential_statement_added_to_symbol_table():
solver = SolverWithoutExistentialResolution()
x = S_('x')
y = S_('y')
z = S_('z')
P = S_('P')
Q = S_('Q')
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
assert 'P' in solver.symbol_table
assert len(solver.symbol_table['P'].formulas) == 1
assert isinstance(
solver.symbol_table['P'].formulas[0].consequent,
logic.ExistentialPredicate
)
solver = SolverWithoutExistentialResolution()
solver.walk(Implication(EP_(x, EP_(y, P(x, y, z))), Q(z)))
assert 'P' in solver.symbol_table
def test_existential_statement_resolution():
solver = SolverWithExistentialResolution()
x = S_('x')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
))
solver.walk(extensional)
assert 'Q' in solver.symbol_table
query = Query(x, Q(x))
result = solver.walk(query)
assert isinstance(result, expressions.Constant)
assert result.value is not None
assert result.value == frozenset({'a', 'b'})
y = S_('y')
P = S_('P')
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
query = Query(x, EP_(y, P(x, y)))
result = solver.walk(query)
assert isinstance(result, expressions.Constant)
assert result.value is not None
assert result.value == frozenset({'a', 'b'})
u = S_('u')
v = S_('v')
query = Query(u, EP_(v, P(u, v)))
result = solver.walk(query)
assert isinstance(result, expressions.Constant)
assert result.value is not None
assert result.value == frozenset({'a', 'b'})
def test_existential_statement_resolution_undefined():
solver = SolverWithExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
))
solver.walk(extensional)
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
assert 'P' in solver.symbol_table
query = Query(y, P(x, y))
result = solver.walk(query)
assert result is UNDEFINED
def test_function_application_on_null_returns_false():
solver = SolverWithExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
))
solver.walk(extensional)
f = Implication(EP_(y, P(x, y)), Q(x))
res = solver.walk(f(NULL, NULL))
assert isinstance(res, expressions.Constant)
assert not res.value
def test_existential_and_query_resolution():
solver = SolverWithExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
Fact(P(a)),
))
solver.walk(extensional)
query = Query(x, P(x) & EP_(y, Q(y)))
result = solver.walk(query)
assert isinstance(result, expressions.Constant)
assert result.value is not None
assert result.value == frozenset({'a'})
def test_multiple_existential_variables_in_consequent():
solver = SolverWithExistentialResolution()
x = S_('x')
y = S_('y')
z = S_('z')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
))
solver.walk(extensional)
solver.walk(Implication(EP_(x, EP_(y, P(x, y, z))), Q(z)))
query = Query(z, EP_(x, EP_(y, P(x, y, z))))
result = solver.walk(query)
assert isinstance(result, expressions.Constant)
assert result.value is not None
assert result.value == frozenset({'a', 'b'})
def test_multiple_existential_variables_in_consequent_undefined():
solver = SolverWithExistentialResolution()
x = S_('x')
y = S_('y')
z = S_('z')
P = S_('P')
Q = S_('Q')
a = C_('a')
b = C_('b')
extensional = ExpressionBlock((
Fact(Q(a)),
Fact(Q(b)),
))
solver.walk(extensional)
solver.walk(Implication(EP_(x, EP_(y, P(x, y, z))), Q(z)))
query = Query(x, EP_(y, EP_(z, P(x, y, z))))
result = solver.walk(query)
assert result is UNDEFINED
def test_cannot_mix_existential_and_non_existential_rule_definitions():
solver = SolverWithoutExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
R = S_('R')
solver.walk(Implication(EP_(y, P(x, y)), Q(x)))
assert 'P' in solver.symbol_table
assert 'P' in solver.existential_intensional_database()
with pytest.raises(NeuroLangException):
solver.walk(Implication(P(x, y), R(x, y)))
def test_existential_rule_with_constant_in_consequent():
solver = SolverWithoutExistentialResolution()
x = S_('x')
y = S_('y')
P = S_('P')
Q = S_('Q')
solver.walk(Implication(EP_(y, P(x, y, C_(1))), Q(x)))
assert 'P' in solver.symbol_table
|
StarcoderdataPython
|
3378598
|
#----------------------------------------------------------------------------
# Name: datetimeparser.py
#
# Purpose: - Instantiate datetime.datetime/date instance from a string
# date representation.
# Uses dateutil from http://labix.org/python-dateutil.
#
# - Creates string representation of datetime/date instance.
#
#
# Author: <NAME>
#
# Created: 28-Feb-06
# CVS-ID:
# Copyright: (c) 2005 ActiveGrid, Inc.
# License: wxWindows License
#----------------------------------------------------------------------------
import datetime
try:
import dateutil.parser
DATEUTIL_INSTALLED = True
except ImportError:
DATEUTIL_INSTALLED = False
ISO_8601_DATE_FORMAT = "%Y-%m-%d"
ISO_8601_TIME_FORMAT = "%H:%M:%S"
ISO_8601_DATETIME_FORMAT = "%s %s" %(ISO_8601_DATE_FORMAT,
ISO_8601_TIME_FORMAT)
DEFAULT_DATETIME = datetime.datetime(1, 1, 1, 0, 0, 0, 0)
def format(dateobj, formatstr=None):
if (formatstr != None and _isDateTimeObject(dateobj)):
return dateobj.strftime(str(formatstr))
return str(dateobj)
def parse(datestr, formatstr=None, asdate=False, astime=False):
"""Instantiates and returns a datetime instance from the datestr datetime
representation.
Optionally, a format string may be used. The format is only loosely
interpreted, its only purpose beeing to determine if the year is first
or last in datestr, and whether the day is in front or follows the
month. If no formatstr is passed in, dateutil tries its best to parse
the datestr. The default date format is YYYY-mm-dd HH:SS.
If asdate is True, returns a date instance instead of a datetime
instance, if astime is True, returns a time instance instead of a
datetime instance."""
dayfirst, yearfirst = _getDayFirstAndYearFirst(formatstr)
rtn = None
try:
if DATEUTIL_INSTALLED:
rtn = dateutil.parser.parse(str(datestr), fuzzy=True,
dayfirst=dayfirst, yearfirst=yearfirst,
default=DEFAULT_DATETIME)
else:
rtn = DEFAULT_DATETIME
except:
rtn = DEFAULT_DATETIME
if (asdate and isinstance(rtn, datetime.datetime)):
rtn = datetime.date(rtn.year, rtn.month, rtn.day)
elif (astime and isinstance(rtn, datetime.datetime)):
rtn = datetime.time(rtn.hour, rtn.minute, rtn.second, rtn.microsecond)
return rtn
def _isDateTimeObject(obj):
return (isinstance(obj, datetime.datetime) or
isinstance(obj, datetime.date) or
isinstance(obj, datetime.time))
def _getDayFirstAndYearFirst(formatstr):
dayFirst = False
yearFirst = False
gotYear = False
gotMonth = False
gotDay = False
if (formatstr == None):
formatstr = ""
for c in formatstr:
if (c.lower() == "y"):
if (gotYear):
continue
if (not gotDay and not gotMonth):
yearFirst = True
gotYear = True
elif (c.lower() == "m"):
if (gotMonth):
continue
if (not gotDay):
dayFirst = False
gotMonth = True
elif (c.lower() == "d"):
if (gotDay):
continue
if (not gotMonth):
dayFirst = True
gotDay = True
return dayFirst, yearFirst
|
StarcoderdataPython
|
11349870
|
<gh_stars>0
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
import collections
from aiida.orm.implementation.general.group import AbstractGroup
from aiida.common.exceptions import (ModificationNotAllowed, UniquenessError,
NotExistent)
from aiida.orm.implementation.django.node import Node
from aiida.backends.djsite.utils import get_automatic_user
from django.db import transaction, IntegrityError
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from aiida.orm.implementation.general.utils import get_db_columns
class Group(AbstractGroup):
def __init__(self, **kwargs):
from aiida.backends.djsite.db.models import DbGroup
dbgroup = kwargs.pop('dbgroup', None)
if dbgroup is not None:
if isinstance(dbgroup, (int, long)):
try:
dbgroup = DbGroup.objects.get(pk=dbgroup)
except ObjectDoesNotExist:
raise NotExistent("Group with pk={} does not exist".format(
dbgroup))
if not isinstance(dbgroup, DbGroup):
raise TypeError("dbgroup is not a DbGroup instance, it is "
"instead {}".format(str(type(dbgroup))))
if kwargs:
raise ValueError("If you pass a dbgroups, you cannot pass any "
"further parameter")
self._dbgroup = dbgroup
else:
name = kwargs.pop('name', None)
if name is None:
raise ValueError("You have to specify a group name")
group_type = kwargs.pop('type_string',
"") # By default, an user group
user = kwargs.pop('user', get_automatic_user())
description = kwargs.pop('description', "")
self._dbgroup = DbGroup(name=name, description=description,
user=user, type=group_type)
if kwargs:
raise ValueError("Too many parameters passed to Group, the "
"unknown parameters are: {}".format(
", ".join(kwargs.keys())))
@staticmethod
def get_db_columns():
# from aiida.backends.djsite.db.models import DbGroup
from aiida.backends.djsite.querybuilder_django.dummy_model import DbGroup
return get_db_columns(DbGroup)
@property
def name(self):
return self.dbgroup.name
@property
def description(self):
return self.dbgroup.description
@description.setter
def description(self, value):
self.dbgroup.description = value
# Update the entry in the DB, if the group is already stored
if self.is_stored:
self.dbgroup.save()
@property
def type_string(self):
return self.dbgroup.type
@property
def user(self):
return self.dbgroup.user
@property
def dbgroup(self):
return self._dbgroup
@property
def pk(self):
return self._dbgroup.pk
@property
def id(self):
return self._dbgroup.pk
@property
def uuid(self):
return unicode(self.dbgroup.uuid)
def __int__(self):
if self._to_be_stored:
return None
else:
return self._dbnode.pk
@property
def is_stored(self):
return self.pk is not None
def store(self):
if not self.is_stored:
try:
with transaction.atomic():
self.dbgroup.save()
except IntegrityError:
raise UniquenessError("A group with the same name (and of the "
"same type) already "
"exists, unable to store")
# To allow to do directly g = Group(...).store()
return self
def add_nodes(self, nodes):
from aiida.backends.djsite.db.models import DbNode
if not self.is_stored:
raise ModificationNotAllowed("Cannot add nodes to a group before "
"storing")
# First convert to a list
if isinstance(nodes, (Node, DbNode)):
nodes = [nodes]
if isinstance(nodes, basestring) or not isinstance(
nodes, collections.Iterable):
raise TypeError("Invalid type passed as the 'nodes' parameter to "
"add_nodes, can only be a Node, DbNode, or a list "
"of such objects, it is instead {}".format(
str(type(nodes))))
list_pk = []
for node in nodes:
if not isinstance(node, (Node, DbNode)):
raise TypeError("Invalid type of one of the elements passed "
"to add_nodes, it should be either a Node or "
"a DbNode, it is instead {}".format(
str(type(node))))
if node.pk is None:
raise ValueError("At least one of the provided nodes is "
"unstored, stopping...")
list_pk.append(node.pk)
self.dbgroup.dbnodes.add(*list_pk)
@property
def nodes(self):
class iterator(object):
def __init__(self, dbnodes):
self.dbnodes = dbnodes
self.generator = self._genfunction()
def _genfunction(self):
for n in self.dbnodes:
yield n.get_aiida_class()
def __iter__(self):
return self
def __len__(self):
return self.dbnodes.count()
# For future python-3 compatibility
def __next__(self):
return self.next()
def next(self):
return next(self.generator)
return iterator(self.dbgroup.dbnodes.all())
def remove_nodes(self, nodes):
from aiida.backends.djsite.db.models import DbNode
if not self.is_stored:
raise ModificationNotAllowed("Cannot remove nodes from a group "
"before storing")
# First convert to a list
if isinstance(nodes, (Node, DbNode)):
nodes = [nodes]
if isinstance(nodes, basestring) or not isinstance(
nodes, collections.Iterable):
raise TypeError("Invalid type passed as the 'nodes' parameter to "
"remove_nodes, can only be a Node, DbNode, or a "
"list of such objects, it is instead {}".format(
str(type(nodes))))
list_pk = []
for node in nodes:
if not isinstance(node, (Node, DbNode)):
raise TypeError("Invalid type of one of the elements passed "
"to add_nodes, it should be either a Node or "
"a DbNode, it is instead {}".format(
str(type(node))))
if node.pk is None:
raise ValueError("At least one of the provided nodes is "
"unstored, stopping...")
list_pk.append(node.pk)
self.dbgroup.dbnodes.remove(*list_pk)
@classmethod
def query(cls, name=None, type_string="", pk=None, uuid=None, nodes=None,
user=None, node_attributes=None, past_days=None,
name_filters=None, **kwargs):
from aiida.backends.djsite.db.models import (DbGroup, DbNode,
DbAttribute)
# Analyze args and kwargs to create the query
queryobject = Q()
if name is not None:
queryobject &= Q(name=name)
if type_string is not None:
queryobject &= Q(type=type_string)
if pk is not None:
queryobject &= Q(pk=pk)
if uuid is not None:
queryobject &= Q(uuid=uuid)
if past_days is not None:
queryobject &= Q(time__gte=past_days)
if nodes is not None:
pk_list = []
if not isinstance(nodes, collections.Iterable):
nodes = [nodes]
for node in nodes:
if not isinstance(node, (Node, DbNode)):
raise TypeError("At least one of the elements passed as "
"nodes for the query on Group is neither "
"a Node nor a DbNode")
pk_list.append(node.pk)
queryobject &= Q(dbnodes__in=pk_list)
if user is not None:
if isinstance(user, basestring):
queryobject &= Q(user__email=user)
else:
queryobject &= Q(user=user)
if name_filters is not None:
name_filters_list = {"name__" + k: v for (k, v)
in name_filters.iteritems() if v}
queryobject &= Q(**name_filters_list)
groups_pk = set(DbGroup.objects.filter(
queryobject, **kwargs).values_list('pk', flat=True))
if node_attributes is not None:
for k, vlist in node_attributes.iteritems():
if isinstance(vlist, basestring) or not isinstance(
vlist, collections.Iterable):
vlist = [vlist]
for v in vlist:
# This will be a dictionary of the type
# {'datatype': 'txt', 'tval': 'xxx') for instance, if
# the passed data is a string
base_query_dict = DbAttribute.get_query_dict(v)
# prepend to the key the right django string to SQL-join
# on the right table
query_dict = {'dbnodes__dbattributes__{}'.format(k2): v2
for k2, v2 in base_query_dict.iteritems()}
# I narrow down the list of groups.
# I had to do it in this way, with multiple DB hits and
# not a single, complicated query because in SQLite
# there is a maximum of 64 tables in a join.
# Since typically one requires a small number of filters,
# this should be ok.
groups_pk = groups_pk.intersection(DbGroup.objects.filter(
pk__in=groups_pk, dbnodes__dbattributes__key=k,
**query_dict).values_list('pk', flat=True))
retlist = []
# Return sorted by pk
for dbgroup in sorted(groups_pk):
retlist.append(cls(dbgroup=dbgroup))
return retlist
def delete(self):
if self.pk is not None:
self.dbgroup.delete()
|
StarcoderdataPython
|
6407414
|
from collections import deque
def delivery(products, args):
for product in args:
products.append(product)
return products
def sell(products, args):
products = deque(products)
if len(args) == 1 and str(args[0]).isdigit():
for i in range(int(args[0])):
products.popleft()
elif len(args) == 0:
products.popleft()
elif len(args) != 0:
for product in args:
if product in products:
products = list(filter(lambda s: s != product, products))
products = list(products)
return products
def stock_availability(products, action, *args):
if action == 'delivery':
products = delivery(products, args)
elif action == 'sell':
products = sell(products, args)
return products
print(stock_availability(["choco", "vanilla", "banana"], "delivery", "caramel", "berry"))
print(stock_availability(["chocolate", "vanilla", "banana"], "delivery", "cookie","banana"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", 3))
print(stock_availability(["chocolate", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["cookie", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", "cookie"))
'''
Results:
['choco', 'vanilla', 'banana', 'caramel', 'berry']
['chocolate', 'vanilla', 'banana', 'cookie', 'banana']
['vanilla', 'banana']
[]
['banana']
['cookie', 'banana']
['chocolate', 'vanilla', 'banana']
'''
|
StarcoderdataPython
|
8002387
|
# Group superclass definition
# Maintained by <NAME> and <NAME>
import pickle
class Group:
def __init__(self, group_id, engineers=[]):
# Defined things
self.group_id = group_id
self.engineers = engineers
def export(self, path="./exports/"):
pickle.dump(self, open(path+self.name+"_"+self.id+".", "wb"))
|
StarcoderdataPython
|
11216264
|
<gh_stars>0
from numpy.lib.function_base import median
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("crestamento_4.jpg")
image_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image_gray = cv2.cvtColor(image_RGB, cv2.COLOR_RGB2GRAY)
median_blur = cv2.medianBlur(image_gray,9)
#canny
result = cv2.Canny(image_gray,100,200)
# cálculo da quantidade de pixels da folha
_, imgBinaryLeaf = cv2.threshold(image_gray, 127, 255, cv2.THRESH_BINARY)
leaf_area = np.sum(imgBinaryLeaf == 0) #área da folha em pixels
# fim do cálculo da quantidade de pixels da folha
# cálculo da quantidade de pixels de borda
_, imgBinary = cv2.threshold(result, 127, 255, cv2.THRESH_BINARY)
border_area = np.sum(imgBinary == 255) #área da borda em pixels
print('Border pixels: ', border_area)
print('Leaf area: ', leaf_area)
# fim do cálculo da quantidade de pixels de borda
cv2.imwrite('crestament_canny_bi.png', imgBinary)
plt.show()
|
StarcoderdataPython
|
6696925
|
<reponame>opentensor/neurons
from __init__ import neuron
if __name__ == "__main__":
template = neuron().run()
|
StarcoderdataPython
|
3327086
|
"""Add constraint to validate if one of credential column is not null
Revision ID: 3e6d8d0a9cfe
Revises: 835549b518a2
Create Date: 2020-01-29 10:08:37.117163
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "3e6d8d0a9cfe"
down_revision = "835549b518a2"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"credentials",
sa.Column("_metadata", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
)
op.alter_column("credentials", "citizen_id", existing_type=sa.TEXT(), nullable=True)
op.alter_column("credentials", "email", existing_type=sa.TEXT(), nullable=True)
op.alter_column("credentials", "msisdn", existing_type=sa.TEXT(), nullable=True)
op.alter_column("credentials", "username", existing_type=sa.TEXT(), nullable=True)
op.drop_column("credentials", "additional_data")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"credentials",
sa.Column(
"additional_data",
postgresql.JSONB(astext_type=sa.Text()),
autoincrement=False,
nullable=True,
),
)
op.alter_column("credentials", "username", existing_type=sa.TEXT(), nullable=False)
op.alter_column("credentials", "msisdn", existing_type=sa.TEXT(), nullable=False)
op.alter_column("credentials", "email", existing_type=sa.TEXT(), nullable=False)
op.alter_column(
"credentials", "citizen_id", existing_type=sa.TEXT(), nullable=False
)
op.drop_column("credentials", "_metadata")
# ### end Alembic commands ###
|
StarcoderdataPython
|
1689849
|
<reponame>tomhosker/polygon_puzzle<gh_stars>0
"""
This code tests the PuzzleMaker class.
"""
# Test imports.
import config
from puzzle_maker import PuzzleMaker
from word_arbiter import WordArbiter
##############
# MAIN CLASS #
##############
def test_generated_polygon():
""" Test that the polygon generated by the PuzzleMaker class fulfils our
requirements consistently. """
maker = PuzzleMaker()
arbiter = WordArbiter()
iterations = 10
for _ in range(iterations):
polygon = maker.make_polygon()
assert len(polygon.max_letter_word) == config.MAX_WORD_LENGTH
assert arbiter.is_a_word(polygon.max_letter_word)
assert len(polygon.other_words) >= config.MIN_OTHER_WORDS
|
StarcoderdataPython
|
6504711
|
<reponame>algonomicon/a-neural-algorithm-of-artistic-style
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torchvision.transforms as T
from settings import DEVICE, SIZE
# Image Transforms
loader = T.Compose([
T.Resize(SIZE),
T.CenterCrop(SIZE),
T.ToTensor()
])
unloader = T.ToPILImage()
def load_image(path):
image = loader(Image.open(path)).unsqueeze(0)
return image.to(DEVICE, torch.float)
def save_image(tensor, path):
image = unloader(tensor.cpu().clone().squeeze(0))
image.save(path)
|
StarcoderdataPython
|
6480021
|
from abjadext import microtones
def test_dummy():
assert microtones.__version__ is not None
|
StarcoderdataPython
|
3200219
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask.ext.restful import Resource
from flask.ext.restful import reqparse
from flask_mail import Message
from .. import mail
from ..login.views import auth
class sendMail(Resource):
decorators = [auth.login_required]
def post(self):
paser = reqparse.RequestParser()
paser.add_argument('phone', type=str)
paser.add_argument('suggestion')
args = paser.parse_args(strict=True)
msg = Message(
'意见反馈',
recipients=["<EMAIL>"])
msg.body = args['suggestion']
msg.html = "<b>" + args['suggestion'] + "</b>"
mail.send(msg)
return {'staus': 'success', 'mesg': '邮件已发送'}
|
StarcoderdataPython
|
1672297
|
<reponame>zcong1993/django
from django.apps import AppConfig
class ImagesConfig(AppConfig):
name = 'start.apps.images'
verbose_name = "Images"
|
StarcoderdataPython
|
12853591
|
import torch
def save_param(model, pth_path):
'''
save the parameters of the model
Args:
model: the model to which the params belong
pth_path: the path where .pth file is saved
'''
torch.save(model.state_dict(), pth_path)
def load_param(model, pth_path):
'''
load the parameters of the model
Args:
model: the model where the params go into
pth_path: the path where .pth (to be loaded) is saved
'''
model.load_state_dict(torch.load(pth_path))
|
StarcoderdataPython
|
1907973
|
# Copyright (c) 2015 Huawei Technologies India Pvt.Limited.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import extension
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronv20
from networking_sfc._i18n import _
from networking_sfc.cli import port_pair as pp
PORT_PAIR_GROUP_RESOURCE = 'port_pair_group'
def get_port_pair_group_id(client, id_or_name):
return neutronv20.find_resourceid_by_name_or_id(client,
PORT_PAIR_GROUP_RESOURCE,
id_or_name)
class PortPairGroup(extension.NeutronClientExtension):
resource = PORT_PAIR_GROUP_RESOURCE
resource_plural = '%ss' % resource
object_path = '/sfc/%s' % resource_plural
resource_path = '/sfc/%s/%%s' % resource_plural
versions = ['2.0']
def add_common_arguments(parser):
parser.add_argument(
'--description',
help=_('Description for the Port Pair Group.'))
parser.add_argument(
'--port-pair',
metavar='PORT-PAIR',
dest='port_pairs',
default=[],
action='append',
help=_('ID or name of the Port Pair. '
'This option can be repeated.'))
def update_common_args2body(client, body, parsed_args):
if parsed_args.port_pairs:
body['port_pairs'] = [(pp.get_port_pair_id(client, pp1))
for pp1 in parsed_args.port_pairs]
neutronv20.update_dict(parsed_args, body, ['name', 'description'])
return body
class PortPairGroupCreate(extension.ClientExtensionCreate, PortPairGroup):
"""Create a Port Pair Group."""
shell_command = 'port-pair-group-create'
def add_known_arguments(self, parser):
parser.add_argument(
'name',
metavar='NAME',
help=_('Name of the Port Pair Group.'))
add_common_arguments(parser)
parser.add_argument(
'--port-pair-group-parameters',
metavar='[lb_fields=LB_FIELDS, ppg_n_tuple_mapping=TUPLE_VALUES]',
type=utils.str2dict_type(optional_keys=['lb_fields',
'ppg_n_tuple_mapping']),
help=_('Dictionary of Port pair group parameters. '
'Currently, only \'&\' separated string of the lb_fields '
'and ppg_n_tuple_mapping are supported. For '
'ppg_n_tuple_mapping the supported command is '
'\'key=value\' separated by \'&\'. Support '
'ppg_n_tuple_mapping keys are: source_ip_prefix_ingress, '
'source_ip_prefix_egress, destination_ip_prefix_ingress, '
'destination_ip_prefix_egress, source_port_ingress, '
'source_port_egress, destination_port_ingress, '
'destination_port_egress.'))
def args2body(self, parsed_args):
body = {}
if parsed_args.port_pair_group_parameters:
body['port_pair_group_parameters'] = {}
for key, value in parsed_args.port_pair_group_parameters.items():
# Setup lb_fields key and value(s)
if key == 'lb_fields':
body['port_pair_group_parameters'][key] = ([
field for field in value.split('&') if field])
# Setup ppg_n_tuple_mapping key(s) and value(s)
elif key == 'ppg_n_tuple_mapping':
# Reorganize ppg_n_tuple_mapping values in dict with
# structure {'ppg_n_tuple_mapping': 'ingress_n_tuple': {},
# 'egress_n_tuple': {}}
ppg_n_tuple_dict = {}
ingress_n_tuple_dict = {}
egress_n_tuple_dict = {}
# Split input of ppg_n_tuple_mapping by & and =
raw_data = dict([
(content[0], content[1]) for content in
[sub_field.split('=') for sub_field in
[field for field in value.split('&') if field]]
])
# Store ingress_n_tuple values and egress_n_tuple values
# into corresponding dictionary, and expand
# source_port_range and destination_port_range to
# source_port_range_min, source_port_range_max,
# destination_port_range_min, and
# destination_port_range_max if exits
for n_tuple_key, n_tuple_value in raw_data.items():
if n_tuple_key[-7:] == "ingress":
n_tuple_key = n_tuple_key[:-8]
if (
'source_port' in n_tuple_key or
'destination_port' in n_tuple_key
):
min_port, sep, max_port = \
n_tuple_value.partition(":")
if not max_port:
max_port = min_port
ingress_n_tuple_dict[
n_tuple_key + '_range_min'] = int(min_port)
ingress_n_tuple_dict[
n_tuple_key + '_range_max'] = int(max_port)
else:
ingress_n_tuple_dict[n_tuple_key] = \
n_tuple_value
elif n_tuple_key[-6:] == "egress":
n_tuple_key = n_tuple_key[:-7]
if (
'source_port' in n_tuple_key or
'destination_port' in n_tuple_key
):
min_port, sep, max_port = \
n_tuple_value.partition(":")
if not max_port:
max_port = min_port
egress_n_tuple_dict[
n_tuple_key + '_range_min'] = int(min_port)
egress_n_tuple_dict[
n_tuple_key + '_range_max'] = int(max_port)
else:
egress_n_tuple_dict[n_tuple_key] = \
n_tuple_value
ppg_n_tuple_dict['ingress_n_tuple'] = ingress_n_tuple_dict
ppg_n_tuple_dict['egress_n_tuple'] = egress_n_tuple_dict
body['port_pair_group_parameters'][key] = ppg_n_tuple_dict
else:
body['port_pair_group_parameters'][key] = value
body = update_common_args2body(self.get_client(), body, parsed_args)
return {self.resource: body}
class PortPairGroupUpdate(extension.ClientExtensionUpdate, PortPairGroup):
"""Update Port Pair Group's information."""
shell_command = 'port-pair-group-update'
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
metavar='NAME',
help=_('Name of the Port Pair Group.'))
add_common_arguments(parser)
def args2body(self, parsed_args):
body = {}
body = update_common_args2body(self.get_client(), body, parsed_args)
return {self.resource: body}
class PortPairGroupDelete(extension.ClientExtensionDelete, PortPairGroup):
"""Delete a given Port Pair Group."""
shell_command = 'port-pair-group-delete'
class PortPairGroupList(extension.ClientExtensionList, PortPairGroup):
"""List Port Pair Groups that belongs to a given tenant."""
shell_command = 'port-pair-group-list'
list_columns = ['id', 'name', 'port_pairs']
pagination_support = True
sorting_support = True
class PortPairGroupShow(extension.ClientExtensionShow, PortPairGroup):
"""Show information of a given Port Pair Group."""
shell_command = 'port-pair-group-show'
|
StarcoderdataPython
|
9788669
|
#!/usr/bin/python3
# Adding src code to the test folder path
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
# Loading the required packages
import unittest
from add_custom_words import add_new_words
from anagram_generator import get_anagrams
from character_tree import CharacterMap, create_charmap_dictionary
# Helper module to unit test get_anagrams(input_string, maximum_size=None)
class TestEndToEndModuleForAnagramGenerator(unittest.TestCase):
'''Test Modules for anagram_generator.get_anagrams
'''
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def test_defaults(self):
words_testable = {'loop': ['polo', 'pool'], 'loops': ['polos', 'sloop', 'spool'],
'aeprs': ['asper', 'spear', 'prase', 'parse', 'spare', 'spaer'], 'blitzkrig': []}
for word, resultant in words_testable.items():
got = get_anagrams(word)
with self.subTest(word=word, resultant=resultant, type='Mismatch in contents'):
self.assertEqual(sorted(got), sorted(resultant))
with self.subTest(word=word, resultant=resultant, type='Mismatch in length'):
self.assertEqual(len(got), len(resultant))
def test_all_present(self):
words_testable = {'loop': ['polo', 'pool'], 'loops': ['polos', 'sloop', 'spool'],
'aeprs': ['asper', 'spear', 'prase', 'parse', 'spare', 'spaer']}
for word, resultant in words_testable.items():
got = get_anagrams(word)
with self.subTest(word=word, resultant=resultant, type='Mismatch in contents'):
self.assertEqual(sorted(got), sorted(resultant))
with self.subTest(word=word, resultant=resultant, type='Mismatch in length'):
self.assertEqual(len(got), len(resultant))
def test_all_new(self):
words_testable = {'blitzkrig': [], 'blitzsdxsskrig': [], 'werawqpmaezig': []}
for word, resultant in words_testable.items():
got = get_anagrams(word)
with self.subTest(word=word, resultant=resultant, type='Mismatch in contents'):
self.assertEqual(sorted(got), sorted(resultant))
with self.subTest(word=word, resultant=resultant, type='Mismatch in length'):
self.assertEqual(len(got), len(resultant))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
59846
|
# coding: utf-8 -*-
'''
GFS.py contains utility functions for GFS
'''
__all__ = ['get_akbk',
'get_pcoord',
'read_atcf']
import numpy as _np
import pandas as _pd
def get_akbk():
'''
Returns ak,bk for 64 level GFS model
vcoord is obtained from global_fcst.fd/gfsio_module.f
ak,bk are as computed from treadeo.gfsio.f for
hybrid = .true. and idvc == 2
'''
vcoord = _np.array([1.0000000,0.99467099,0.98863202,0.98180002,0.97408301, \
0.96538502,0.95560300,0.94463098,0.93235999,0.91867799,0.90347999, \
0.88666302,0.86813903,0.84783000,0.82568502,0.80167699,0.77581102, \
0.74813300,0.71872902,0.68773103,0.65531600,0.62170500,0.58715999, \
0.55197400,0.51646298,0.48095500,0.44577801,0.41124901,0.37765899, \
0.34526899,0.31430000,0.28492799,0.25728399,0.23145400,0.20748200, \
0.18537199,0.16509899,0.14660800,0.12982300,0.11465500,0.10100200, \
0.88756002E-01,0.77808000E-01,0.68048999E-01,0.59370000E-01, \
0.51670998E-01,0.44854999E-01,0.38830999E-01,0.33514999E-01, \
0.28829999E-01,0.24707999E-01,0.21083999E-01,0.17901000E-01, \
0.15107000E-01,0.12658000E-01,0.10511000E-01,0.86310003E-02, \
0.69849999E-02,0.55439998E-02,0.42840000E-02,0.31830000E-02, \
0.22199999E-02,0.13780000E-02,0.64200000E-03,0.0000000])
ak = vcoord / 1000.
bk = vcoord / 1.
return ak,bk
def get_pcoord():
'''
Returns the pressure levels in hPa of the native GFS model with 64 levels.
OUTPUT:
pres = pressure levels (hPa) assuming pref=1013.0
'''
ak,bk = get_akbk()
pref = 101.3
pres = ak[:-1] + bk[:-1]*pref
return pres * 10.
def read_atcf(filename):
'''
Read an ATCF file into a dataframe for ease of processing.
INPUT:
filename = ATCF filename
The file contents are specified at:
http://www.nrlmry.navy.mil/atcf_web/docs/database/new/abdeck.html
OUTPUT:
df = DataFrame containing the file contents
'''
def _to_number(s):
tmp = 0.1 * _np.float(s[:-1])
if s[-1] in ['S','W']:
v = -1.0 * tmp if s[-1] in ['S'] else 360.0 - tmp
else:
v = tmp
return v
# column names
names = ['BASIN','CY','YYYYMMDDHH','TECHNUM','TECH','TAU','LAT','LON','VMAX','MSLP','TY','RAD','WINDCODE','RAD1','RAD2','RAD3','RAD4','POUTER','ROUTER','RMW','GUSTS','EYE','SUBREGION','MAXSEAS','INITIALS','DIR','SPEED','STORMNAME','DEPTH','SEAS','SEASCODE','SEAS1','SEAS2','SEAS3','SEAS4','USERDEFINE1','USERDATA1','USERDEFINE2','USERDATA2','USERDEFINE3','USERDATA3','USERDEFINE4','USERDATA4','USERDEFINE5','USERDATA5']
# column datatypes
dtypes = {'BASIN':str,'CY':str,'YYYYMMDDHH':str,'TECHNUM':_np.float,'TECH':str,'TAU':_np.float,'LAT':str,'LON':str,'VMAX':_np.float,'MSLP':_np.float,'TY':str,'RAD':_np.float,'WINDCODE':str,'RAD1':_np.float,'RAD2':_np.float,'RAD3':_np.float,'RAD4':_np.float,'POUTER':_np.float,'ROUTER':_np.float,'RMW':_np.float,'GUSTS':_np.float,'EYE':_np.float,'SUBREGION':str,'MAXSEAS':_np.float,'INITIALS':str,'DIR':_np.float,'SPEED':_np.float,'STORMNAME':str,'DEPTH':str,'SEAS':_np.float,'SEASCODE':str,'SEAS1':_np.float,'SEAS2':_np.float,'SEAS3':_np.float,'SEAS4':_np.float,'USERDEFINE1':str,'USERDATA1':str,'USERDEFINE2':str,'USERDATA2':str,'USERDEFINE3':str,'USERDATA3':str,'USERDEFINE4':str,'USERDATA4':str,'USERDEFINE5':str,'USERDATA5':str}
df = _pd.read_csv(filename,skipinitialspace=True,header=None,names=names,dtype=dtypes)
# convert YYYYMMDDHH into datetime
df['YYYYMMDDHH'] = _pd.to_datetime(df['YYYYMMDDHH'], format='%Y%m%d%H')
# set index columns
index_cols = ['BASIN','CY','YYYYMMDDHH','TECHNUM','TECH','TAU','TY','SUBREGION']
df.set_index(index_cols, inplace=True)
# drop columns that have no information
df.dropna(axis=1,how='all',inplace=True)
# convert Lat/Lon to floats from hemisphere info
df['LAT'] = df['LAT'].apply(lambda f: _to_number(f))
df['LON'] = df['LON'].apply(lambda f: _to_number(f))
return df
|
StarcoderdataPython
|
1891534
|
# -----------------------------------------------------------------------------
# Copyright (c) 2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
# -----------------------------------------------------------------------------
import click
from PyInstaller import __version__
from . import opts
@click.group()
@click.version_option(__version__)
@click.pass_context
def cli(_ctx: click.Context):
pass
@cli.command('build')
@opts.add_logging_options
@opts.add_build_options
@opts.add_makespec_options
def build_cmd(**kwargs):
print(kwargs)
def run():
cli(prog_name='pyinstaller')
|
StarcoderdataPython
|
285007
|
from helper import unittest, PillowTestCase
from PIL import Image, ImageFont, ImageDraw
image_font_installed = True
try:
ImageFont.core.getfont
except ImportError:
image_font_installed = False
@unittest.skipIf(not image_font_installed, "image font not installed")
class TestImageFontBitmap(PillowTestCase):
def test_similar(self):
text = 'EmbeddedBitmap'
font_outline = ImageFont.truetype(
font='Tests/fonts/DejaVuSans.ttf', size=24)
font_bitmap = ImageFont.truetype(
font='Tests/fonts/DejaVuSans-bitmap.ttf', size=24)
size_outline = font_outline.getsize(text)
size_bitmap = font_bitmap.getsize(text)
size_final = max(size_outline[0], size_bitmap[0]), max(size_outline[1], size_bitmap[1])
im_bitmap = Image.new('RGB', size_final, (255, 255, 255))
im_outline = im_bitmap.copy()
draw_bitmap = ImageDraw.Draw(im_bitmap)
draw_outline = ImageDraw.Draw(im_outline)
# Metrics are different on the bitmap and ttf fonts,
# more so on some platforms and versions of freetype than others.
# Mac has a 1px difference, linux doesn't.
draw_bitmap.text((0, size_final[1] - size_bitmap[1]),
text, fill=(0, 0, 0), font=font_bitmap)
draw_outline.text((0, size_final[1] - size_outline[1]),
text, fill=(0, 0, 0), font=font_outline)
self.assert_image_similar(im_bitmap, im_outline, 20)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1988212
|
<gh_stars>0
#!/usr/bin/env python2.7
"""
Combine tsv files with read depth. Files should have all the same
lines and should have a column for chromosome/contig, a column for
position, and a column for depth.
script/bin/combine_depth_files.py <file(s)...>
The directory name of each file is used as the strain name.
"""
import gzip
import itertools
import os.path as osp
import sys
handles = []
strains = []
for fname in sys.argv[1:]:
strains.append(osp.basename(osp.dirname(fname)))
if fname.endswith('.gz'):
handles.append(gzip.open(fname))
else:
handles.append(open(fname))
sys.stdout.write('replicon\tpos\t' + '\t'.join(strains) + '\n')
for lines in itertools.izip(*handles):
r = None
p = None
for i, line in enumerate(lines):
fields = line.strip().split('\t')
if i == 0:
sys.stdout.write(fields[0] + '\t' + fields[1])
r = fields[0]
p = fields[1]
else:
if fields[0] != r or fields[1] != p:
raise Exception('Unmatched position at %s in %s' % (line, strain))
sys.stdout.write('\t' + fields[2])
sys.stdout.write('\n')
|
StarcoderdataPython
|
220047
|
import getpass
user = getpass.getuser()
passwd = <PASSWORD>()
print('User:', user)
print('Passwd:', passwd)
|
StarcoderdataPython
|
3423447
|
<filename>flowUsagePlotWorker.py
import pyqtgraph as pg
import logging
from PyQt5.QtCore import QObject, QThread, QTimer, pyqtSignal
logging.basicConfig(format="%(message)s", level=logging.INFO)
class FlowUsagePlotWorker(QObject):
def __init__(self):
super(FlowUsagePlotWorker, self).__init__()
# QObject.__init__(self) # super(...).__init() does this for you in the line above.
self.graphWidget = pg.PlotWidget()
self.pen = pg.mkPen(color='r', width=2)
self.colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
styles = {'color':'blue', 'font-size': '10pt'}
self.graphWidget.setBackground('w')
self.graphWidget.setTitle('Flow Rate Distribution', color='b', size='10pt')
self.graphWidget.setLabel('left', 'Number of Trials Used', **styles)
self.graphWidget.setLabel('bottom', 'Flow Rate', **styles)
self.graphWidget.addLegend()
self.xAxis = self.graphWidget.getAxis('bottom')
self.ymax = 2
self.graphWidget.setYRange(0, self.ymax, padding=0)
self.xAxisReady = False
self.groupedVials = {}
self.resultsList = []
self.plottingMode = 0
def getWidget(self):
return self.graphWidget
def receiveDuplicatesDict(self, duplicateVials):
self.groupedVials = duplicateVials
def setPlottingMode(self, value):
self.plottingMode = value
if self.resultsList: # self.resultsList will only hold the dict(s) after the first trial.
self.updatePlot(self.resultsList)
def updatePlot(self, resultsList):
if (self.experimentType == 1):
self.intensityPlot(resultsList)
# elif (self.experimentType == 2):
# self.identityPlot(resultsList)
def intensityPlot(self, resultsList):
# This function currently only plots vials of the first olfactometer (regardless of the plottingMode).
self.resultsList = resultsList
if not self.xAxisReady:
flowrateDicts = list(resultsList[0].values()) # Get a list of the first olfactometer's vial's values which are each a dictionary whose keys are flowrates, instead of doing resultsList[0]['vial_5'] since vial number '5' might not always exist.
allFlowrates = list(x for d in flowrateDicts for x in d.keys()) # Put all the flowrates of all the vials into one list. The flowrates are the keys of each dictionary inside flowrateDicts, where flowrateDicts is a list of dictionaries.
allFlowrates.sort(key=int) # Sort all the flowrates in ascending order, even if there are duplicates...
dict_1 = dict(enumerate(allFlowrates)) # This dict will have integer indices for keys and string flowrates for values, but might have multiple keys holding the same flowrate values...
dict_2 = dict((flow, index) for index, flow in dict_1.items()) # This dict will swap the keys and values of dict_1, thus removing any duplicate flowrates because every key must be unique. But it might also remove the indices linked to those duplicates...
dict_3 = dict(enumerate(dict_2.keys())) # Finally, this dict will contain integer indices for keys and string flowrates for values, such that there will not be any duplicate flowrates or missing indices.
self.xAxis.setTicks([dict_3.items()])
self.graphWidget.setXRange(-1, len(dict_3), padding=0)
self.xAxisDict = dict_3
self.xAxisReady = True
colorIndex = 0
self.graphWidget.clear()
if (self.plottingMode == 0):
# This combines all vials into one line.
allFlowsCounterDict = {} # use this dict to count the total usage for each flowrate.
for vial, flowrateDict in resultsList[0].items():
for flow, totalsDict in flowrateDict.items():
if flow not in allFlowsCounterDict:
allFlowsCounterDict[flow] = 0 # the value will be a counter of the total usage for that flow.
allFlowsCounterDict[flow] += totalsDict['Total']
xValues = []
yValues = []
for index, flow in self.xAxisDict.items():
xValues.append(index) # self.xAxisDict has string flowrates for keys and integer values for the index of the flowrate on the x axis.
yValues.append(allFlowsCounterDict[flow])
if (max(yValues) > self.ymax):
self.ymax += 2
self.graphWidget.setYRange(0, self.ymax, padding=0)
self.pen = pg.mkPen(color=self.colors[colorIndex], width=2)
self.graphWidget.plot(xValues, yValues, name='All vials', pen=self.pen, symbol='s', symbolSize=10, symbolBrush=self.colors[colorIndex])
colorIndex += 1
elif (self.plottingMode == 1):
# This combines vials with duplicate odor/conc and plots a line for each distinct odor/conc.
for odor, concDict in self.groupedVials.items():
for conc, vialsList in concDict.items():
allFlowsCounterDict = {} # use this dict to count numLeft and numResponses for each flowrate.
for vial in vialsList:
for flow, totalsDict in resultsList[0][vial].items():
if flow not in allFlowsCounterDict:
allFlowsCounterDict[flow] = 0 # the value will be a counter of the total usage for that flow.
allFlowsCounterDict[flow] += totalsDict['Total']
xValues = []
yValues = []
for index, flow in self.xAxisDict.items():
if flow in allFlowsCounterDict:
xValues.append(index) # self.xAxisDict has string flowrates for keys and integer values for the index of the flowrate on the x axis.
yValues.append(allFlowsCounterDict[flow])
if (max(yValues) > self.ymax):
self.ymax += 2
self.graphWidget.setYRange(0, self.ymax, padding=0)
# Plot a line for each distinct odor/conc
self.pen = pg.mkPen(color=self.colors[colorIndex], width=2)
self.graphWidget.plot(xValues, yValues, name=f'{odor} {conc}', pen=self.pen, symbol='s', symbolSize=10, symbolBrush=self.colors[colorIndex])
colorIndex += 1
elif (self.plottingMode == 2):
# This is for plotting a line for each vial.
for vialNum, flowrateDict in resultsList[0].items():
xValues = []
yValues = []
for index, flow in self.xAxisDict.items():
if flow in flowrateDict:
xValues.append(index) # self.xAxisDict has string flowrates for keys and integer values for the index of the flowrate on the x axis.
yValues.append(flowrateDict[flow]['Total'])
if (max(yValues) > self.ymax):
self.ymax += 2
self.graphWidget.setYRange(0, self.ymax, padding=0)
self.pen = pg.mkPen(color=self.colors[colorIndex], width=2)
self.graphWidget.plot(xValues, yValues, name=f'Vial {vialNum}', pen=self.pen, symbol='s', symbolSize=10, symbolBrush=self.colors[colorIndex])
colorIndex += 1
def setExperimentType(self, experimentType):
self.experimentType = experimentType
|
StarcoderdataPython
|
300486
|
<filename>src/project/projects/urls.py
from django.urls import path
from .views import project_file
app_name = 'projects'
urlpatterns = [
path('<str:project_code>/', project_file, name='project-root'),
path('<str:project_code>/<path:file_path>', project_file, name='project-file'),
]
|
StarcoderdataPython
|
1794320
|
<filename>pivot/apps.py<gh_stars>1-10
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from __future__ import unicode_literals
from django.apps import AppConfig
class PivotConfig(AppConfig):
name = 'pivot'
|
StarcoderdataPython
|
1741016
|
<filename>btclib/der.py
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"""Strict ASN.1 DER format for ECDSA signature representation.
The original Bitcoin implementation used OpenSSL to verify
ECDSA signatures in ASN.1 DER representation.
However, OpenSSL does not do strict validation
(e.g. extra padding is ignored) and this changes the transaction
hash value, leading to transaction malleability.
This was fixed by BIP66, activated on block 363,724.
source:
https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki
BIP66 mandates a strict DER format:
Format:
[0x30][length][0x02][R-length][R][0x02][S-length][S][sighash]
* 0x30 header byte to indicate compound structure
* length: 1-byte length descriptor of the following data,
excluding the sighash byte
* 0x02 header byte indicating an integer
* r-length: 1-byte length descriptor of the r value that follows
* r: arbitrary-length big-endian r value.
It must use the shortest possible encoding for
a positive integers (which means no null bytes at the start,
except a single one when the next byte has its highest bit set
to avoid being interpreted as a negative number)
* 0x02 header byte indicating an integer
* s-length: 1-byte length descriptor of the s value that follows
* s: arbitrary-length big-endian s value. Same rules as for r apply
* sighash: 1-byte value indicating what data is hashed
(not part of the DER signature)
"""
from typing import Optional, Tuple
from . import dsa
from .alias import Octets
from .curve import Curve
from .curves import secp256k1
from .utils import bytes_from_octets
sighash_all = b'\x01'
sighash_none = b'\x02'
sighash_single = b'\x03'
sighash_all_anyonecanpay = b'\x81'
sighash_none_anyonecanpay = b'\x82'
sighash_single_anyonecanpay = b'\x83'
sighashes = [
sighash_all,
sighash_none,
sighash_single,
sighash_all_anyonecanpay,
sighash_none_anyonecanpay,
sighash_single_anyonecanpay,
]
# (r, s, sighash)
DERSigTuple = Tuple[int, int, Optional[bytes]]
def _bytes_from_scalar(scalar: int) -> bytes:
# scalar is assumed to be in [1, n-1]
elen = scalar.bit_length()
esize = elen // 8 + 1 # not a bug: 'highest bit set' padding included here
n_bytes = scalar.to_bytes(esize, byteorder='big')
return n_bytes
def _serialize_scalar(scalar: int) -> bytes:
# scalar is assumed to be in [1, n-1]
x = _bytes_from_scalar(scalar)
xsize = len(x).to_bytes(1, byteorder='big')
return b'\x02' + xsize + x
def serialize(r: int, s: int, sighash: Optional[Octets] = None,
ec: Curve = secp256k1) -> bytes:
"""Serialize an ECDSA signature in strict ASN.1 DER representation.
Trailing sighash is added if provided
"""
# check that it is a valid signature for the given Curve
dsa._validate_sig(r, s, ec)
result = _serialize_scalar(r)
result += _serialize_scalar(s)
result = b'\x30' + len(result).to_bytes(1, byteorder='big') + result
if sighash is None:
return result
sighash = bytes_from_octets(sighash, 1)
return result + sighash
def deserialize(dersig: Octets, ec: Curve = secp256k1) -> DERSigTuple:
"""Deserialize a strict ASN.1 DER representation of an ECDSA signature.
Return r, s, sighash; sighash is None if not available.
"""
dersig = bytes_from_octets(dersig)
# 7 bytes of meta-data:
# compound header, compound length,
# r value header, r value length,
# s value header, s value length
# sighash type (optional)
#
# the ECDSA signature (r, s) should be 64 bytes,
# r and s being 32 bytes integers each;
# however, integers in DER are signed,
# so if the value being encoded is greater than 2^128,
# a 33rd byte is added in front.
# Bitcoin has a "low s" rule for the s value to be below ec.n,
# but it is only a standardness rule miners are allowed to ignore.
# Moreover, no such rule exists for r.
maxsize = (ec.nsize+1) * 2 + 7 # 73 bytes for secp256k1
sigsize = len(dersig)
if not 8 < sigsize <= maxsize:
errmsg = f"DER signature size ({sigsize}) must be in "
errmsg += f"[9, {maxsize}]"
raise ValueError(errmsg)
if dersig[0] != 0x30:
msg = f"DER signature type must be 0x30 (compound), not {hex(dersig[0])}"
raise ValueError(msg)
# sigsize checks
leftover = sigsize - 2 - dersig[1]
if leftover == 0: # no sighash value
sighash = None
elif leftover == 1: # sighash value
sighash = dersig[sigsize - 1:]
if sighash not in sighashes:
raise ValueError(f"Invalid sighash type {sighash!r}")
else:
msg = f"Declared length ({dersig[1]}) does not "
msg += f"match with actual signature size ({sigsize}) +2 or +3"
raise ValueError(msg)
sizeR = dersig[3] # size of the r scalar
if sizeR == 0:
raise ValueError("Zero-size integer is not allowed for r")
if 5 + sizeR >= sigsize:
raise ValueError("Size of the s scalar must be inside the signature")
sizeS = dersig[5 + sizeR] # size of the s scalar
if sizeS == 0:
raise ValueError("Zero-size integer is not allowed for s")
if sigsize - sizeR - sizeS != 6 + leftover:
raise ValueError("Signature size does not match with size of scalars")
# scalar r
if dersig[2] != 0x02:
raise ValueError("r scalar must be an integer")
if dersig[4] & 0x80:
raise ValueError("Negative number is not allowed for r")
# Null bytes at the start of a scalar are not allowed, unless the
# scalar would otherwise be interpreted as a negative number
if sizeR > 1 and dersig[4] == 0x00 and not (dersig[5] & 0x80):
raise ValueError("Invalid null bytes at the start of r")
r = int.from_bytes(dersig[4:4 + sizeR], byteorder='big')
# scalar s (offset=2+sizeR with respect to r)
if dersig[sizeR + 4] != 0x02:
raise ValueError("s scalar must be an integer")
if dersig[sizeR + 6] & 0x80:
raise ValueError("Negative number is not allowed for s")
# Null bytes at the start of a scalar are not allowed, unless the
# scalar would otherwise be interpreted as a negative number
if sizeS>1 and dersig[sizeR+6]==0x00 and not (dersig[sizeR+7] & 0x80):
raise ValueError("Invalid null bytes at the start of s")
s = int.from_bytes(dersig[6 + sizeR:6 + sizeR + sizeS], byteorder='big')
# checks that the signature is valid for the given Curve
dsa._validate_sig(r, s, ec)
return r, s, sighash
|
StarcoderdataPython
|
175965
|
"""002_addISBN_10
Revision ID: 04659e2c3a9a
Revises: 69a9f86e5636
Create Date: 2022-01-21 02:01:57.474589
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04659e2c3a9a'
down_revision = '69a9f86e5636'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('books', sa.Column('ISBN_10', sa.String(length=10), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('books', 'ISBN_10')
# ### end Alembic commands ###
|
StarcoderdataPython
|
6459653
|
# GENERATED FILE - DO NOT EDIT
VERSION = '201809121509'
BUILDS = {'Darwin': {'sha256': 'e922af671d7baccc099a8bf1e57f40b32d4e92b2abd144437c05da0ce5961abd',
'url': 'http://s3-us-west-2.amazonaws.com/ai2-thor/builds/thor-201809121509-OSXIntel64.zip'},
'Docker': {'tag': '201809121509'},
'Linux': {'sha256': '8dc61edce77e3959235bb74230aa51fef7877120ff68ef646e800cc791f36909',
'url': 'http://s3-us-west-2.amazonaws.com/ai2-thor/builds/thor-201809121509-Linux64.zip'}}
|
StarcoderdataPython
|
1918563
|
import unittest
from transducer.functional import compose
from transducer.react import transduce
from transducer.sinks import CollectingSink, SingularSink
from transducer.sources import iterable_source
from transducer.transducers import (mapping, pairwise, filtering, first)
class TestComposedTransducers(unittest.TestCase):
def test_early_terminating_transducer(self):
input = [0.0, 0.2, 0.8, 0.9, 1.1, 2.3, 2.6, 3.0, 4.1]
output = SingularSink()
iterable_source(iterable=input,
target=transduce(first(lambda x: x > 1.0),
target=output()))
self.assertEqual(output.value, 1.1)
def test_chained_transducers(self):
input = [0.0, 0.2, 0.8, 0.9, 1.1, 2.3, 2.6, 3.0, 4.1]
output = CollectingSink()
iterable_source(iterable=input,
target=transduce(
compose(pairwise(),
mapping(lambda p: p[1] - p[0]),
filtering(lambda d: d < 0.5),
mapping(lambda _: "double-click")),
target=output()))
result = list(output)
self.assertListEqual(result, ['double-click', 'double-click', 'double-click', 'double-click', 'double-click'])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
3442050
|
from PIL import Image
import sys
import urllib.request
#import urllib, cStringIO
import requests
#im = Image.open(requests.get(url, stream=True).raw)
ASCII_CHARS = ['.',',',':',';','+','*','?','%','S','#','@']
#ASCII_CHARS = ['..',',,','::',';;','++','**','??','%%','SS','##','@@']
ASCII_CHARS = ASCII_CHARS[::-1]
'''
method resize():
- takes as parameters the image, and the final width
- resizes the image into the final width while maintaining aspect ratio
'''
def resize(image, new_width):
(old_width, old_height) = image.size
aspect_ratio = float(old_height)/float(old_width)
new_height = int((aspect_ratio * new_width)/2)
new_dim = (new_width, new_height)
new_image = image.resize(new_dim)
return new_image
'''
method grayscalify():
- takes an image as a parameter
- returns the grayscale version of image
'''
def grayscalify(image):
return image.convert('L')
'''
method modify():
- replaces every pixel with a character whose intensity is similar
'''
def modify(image, buckets=25):
initial_pixels = list(image.getdata())
new_pixels = [ASCII_CHARS[pixel_value//buckets] for pixel_value in initial_pixels]
return ''.join(new_pixels)
'''
method do():
- does all the work by calling all the above functions
'''
def do(image, new_width):
image = resize(image,new_width)
image = grayscalify(image)
pixels = modify(image)
len_pixels = len(pixels)
# Construct the image from the character list
new_image = [pixels[index:index+new_width] for index in range(0, len_pixels, new_width)]
return '\n'.join(new_image)
'''
method runner():
- takes as parameter the image path and runs the above code
- handles exceptions as well
- provides alternative output options
'''
def Asciify(path,newSize):
image = None
IMG=None
try:
image = Image.open(path)
except:
try:
urllib.request.urlretrieve(path, 'a.'+path[-3:])
image = Image.open('a.png')
except:
try:
image = Image.open(requests.get(path, stream=True).raw)
except:
print("Unable to find image in",path)
#print(e)
return
image = do(image,newSize)
return(image)
def asciify(path,newSize):
IMG=None
image = None
try:
image = Image.open(path)
except:
try:
urllib.request.urlretrieve(path, 'a.'+path[-3:])
image = Image.open('a.png')
except:
try:
image = Image.open(requests.get(path, stream=True).raw)
except:
print("Unable to find image in",path)
#print(e)
return
image = do(image,newSize)
print(image)
def Version():
return('Current-2021-07-28')
|
StarcoderdataPython
|
1625171
|
#!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, Partition, Region, RW, WD
import numpy as np
@task(privileges=[WD])
def init_field(R):
coloring = np.array(
[[([0, 1],), ([1, 0],), ([0, 1],), ([1, 0],)],
[([1, 1],), ([1, 0],), ([0, 1],), ([1, 1],)],
[([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)],
[([0, 0],), ([1, 1],), ([1, 1],), ([0, 0],)]],
dtype=R.color.dtype)
np.copyto(R.color, coloring, casting='no')
@task
def main():
R = Region([4, 4], {'color': pygion.int2d})
init_field(R)
P = Partition.by_field(R, 'color', [2, 2])
assert P.color_space.volume == 4
print('Parent region has volume %s' % R.ispace.volume)
assert R.ispace.volume == 16
assert P[0, 0].ispace.volume == 4
assert P[0, 1].ispace.volume == 3
assert P[1, 0].ispace.volume == 3
assert P[1, 1].ispace.volume == 6
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6450335
|
# by <NAME>
# Extract Variable (alias introduce explaining variable)
WELL_DONE = 900000
MEDIUM = 600000
COOKED_CONSTANT = 0.05
def is_cookeding_criteria_satisfied(time, temperature, pressure, desired_state):
if desired_state == 'well-done' and time * temperature * pressure * COOKED_CONSTANT >= WELL_DONE:
return True
if desired_state == 'medium' and time * temperature * pressure * COOKED_CONSTANT >= MEDIUM:
return True
return False
|
StarcoderdataPython
|
11214940
|
<filename>package/lucas.py
from functools import cache
@cache
def lucas(index: int) -> int:
""" lucas
This the rercusive function of the lucas suite
Args:
index (int): the index of lucas
Returns:
[int]: the number of lucas at the index
"""
if index == "":
return 2
try:
index = int(index)
if index not in range(494):
raise ValueError
except ValueError as err:
return err
else:
if index == 0:
return 2
if index == 1:
return 1
return lucas(index-1) + lucas(index-2)
if __name__ == '__main__':
pass
|
StarcoderdataPython
|
1788129
|
<gh_stars>0
from flask import Flask, abort, make_response
import redis
app = Flask(__name__)
db = redis.Redis(host='redisserver')
@app.route('/<file>')
@app.route('/<file>.<ext>')
def serve_file(file, ext=None):
result = db.get(file)
if result is None:
return abort(425)
result = make_response(result)
result.mimetype='video/mp4'
return result
|
StarcoderdataPython
|
3299018
|
<gh_stars>10-100
import unittest
from dexter.models import Document, DocumentSource, db
from dexter.models.seeds import seed_db
class TestDocumentSource(unittest.TestCase):
def test_same_person(self):
self.assertEqual(
DocumentSource(
source_type='person', unnamed=False, person_id=1, source_function_id=1,
affiliation_id=1, quoted=1),
DocumentSource(
source_type='person', unnamed=False, person_id=1, source_function_id=1,
affiliation_id=1, quoted=0))
def test_same_person_unnamed(self):
self.assertEqual(
DocumentSource(
source_type='person', unnamed=True, source_function_id=1, affiliation_id=1,
quoted=1),
DocumentSource(
source_type='person', unnamed=True, source_function_id=1, affiliation_id=1,
quoted=0))
def test_diff_person_unnamed(self):
self.assertNotEqual(
DocumentSource(
source_type='person', unnamed=True, source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='person', unnamed=True, source_function_id=1, affiliation_id=2))
self.assertNotEqual(
DocumentSource(
source_type='person', unnamed=True, source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='person', unnamed=True, source_function_id=2, affiliation_id=1))
def test_diff_person(self):
self.assertNotEqual(
DocumentSource(
source_type='person', unnamed=False, person_id=1, source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='person', unnamed=False, person_id=2, source_function_id=1, affiliation_id=2))
def test_same_child(self):
self.assertEqual(
DocumentSource(
source_type='child', unnamed=False, name='Foo', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=False, name='Foo', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=1))
def test_same_child_unnamed(self):
self.assertEqual(
DocumentSource(
source_type='child', unnamed=True, unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=True, unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=1))
def test_diff_child_unnamed(self):
self.assertNotEqual(
DocumentSource(
source_type='child', unnamed=True, unnamed_race_id=2, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=True, unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0))
def test_diff_child_named(self):
self.assertNotEqual(
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=2, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0))
self.assertNotEqual(
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=1, unnamed_gender_id=2,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0))
self.assertNotEqual(
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=2, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=False, name='Fred', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0))
self.assertNotEqual(
DocumentSource(
source_type='child', unnamed=False, name='Joe', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0),
DocumentSource(
source_type='child', unnamed=False, name='Ben', unnamed_race_id=1, unnamed_gender_id=1,
source_age_id=1, source_role_id=1, quoted=0))
def test_same_secondary(self):
self.assertEqual(
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=1))
def test_diff_secondary(self):
self.assertNotEqual(
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=2, affiliation_id=1))
self.assertNotEqual(
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=2))
self.assertNotEqual(
DocumentSource(
source_type='secondary', unnamed=False, name='Blah ', source_function_id=1, affiliation_id=1),
DocumentSource(
source_type='secondary', unnamed=False, name='Source', source_function_id=1, affiliation_id=1))
|
StarcoderdataPython
|
9645425
|
<reponame>DrFirestream/NLP<gh_stars>0
import json
from pathlib import Path
from typing import List, Tuple
import sentencepiece as spm
import torch
import numpy as np
import fire
from .fire_utils import only_allow_defined_args
from .model import Model, HParams
from .common import END_OF_LINE, END_OF_TEXT
class ModelWrapper:
END_OF_LINE = END_OF_LINE
END_OF_TEXT = END_OF_TEXT
def __init__(self, model: Model, sp_model: spm.SentencePieceProcessor):
self.model = model
self.sp_model = sp_model
@classmethod
def load(cls, root: Path):
sp_model = spm.SentencePieceProcessor()
sp_model.load(str(root / 'sp.model'))
hparams = json.loads((root / 'params.json').read_text())['hparams']
hparams.setdefault('n_hidden', hparams['n_embed'])
model = Model(HParams(**hparams))
model.cuda('cuda:0')
state = torch.load(root / 'model.pt')# map_location='cpu')
state_dict = fixed_state_dict(state['state_dict'])
model.load_state_dict(state_dict)
tensor_list = list(state_dict.items())
for layer_tensor_name, tensor in tensor_list:
print("Layer %-42s: %9d elements" % (layer_tensor_name, torch.numel(tensor)))
pytorch_total_params = sum(p.numel() for p in model.parameters())
print ("Total # params: %d" % pytorch_total_params)
return cls(model, sp_model)
def tokenize(self, s: str) -> List[str]:
return self.sp_model.EncodeAsPieces(s)
def token_to_id(self, token: str) -> int:
return self.sp_model.PieceToId(token)
def id_to_token(self, token_id: int) -> str:
return self.sp_model.IdToPiece(int(token_id))
def get_hidden(self, tokenids: torch.Tensor) -> torch.Tensor:
""" Return a tensor with shape (batch_size, len(tokens), len(self.sp_model)),
with hidden embeddings for token ids after each token in tokens.
If this is a start of the text, you may want to prepend END_OF_TEXT:
model.get_log_probs([model.END_OF_TEXT] + tokens).
Use model.tokenize to obtain tokens.
"""
with torch.no_grad():
hidden = self.model(tokenids)['hidden']
return hidden
def get_log_probs(self, tokens: List[str]) -> torch.Tensor:
""" Return a tensor with shape (len(tokens), len(self.sp_model)),
with log-probabilities for tokens after each token in tokens.
If this is a start of the text, you may want to prepend END_OF_TEXT:
model.get_log_probs([model.END_OF_TEXT] + tokens).
Use model.tokenize to obtain tokens.
"""
assert len(tokens) <= self.model.hparams.n_ctx # TODO
ids = [self.token_to_id(t) for t in tokens]
ctx = torch.LongTensor(ids).unsqueeze(0)
with torch.no_grad():
logits = self.model(ctx)['logits'].squeeze(0)
return torch.log_softmax(logits, dim=1)
def get_occurred_log_probs(
self, tokens: List[str]) -> List[Tuple[float, str]]:
""" Return a list of log probs of actually occurred tokens,
starting from the second.
"""
log_probs = self.get_log_probs(tokens)
out = []
for idx, token in enumerate(tokens[1:]):
out.append((float(log_probs[idx, self.token_to_id(token)]), token))
return out
def get_next_top_k(
self, tokens: List[str], top_k: int) -> List[Tuple[float, str]]:
""" Return a list of top k tuples of log prob and token,
for what would come after the last token.
"""
next_log_probs = self.get_log_probs(tokens)[-1]
return sorted([(float(next_log_probs[i]), self.id_to_token(i))
for i in next_log_probs.argsort()[-top_k:]],
reverse=True)
def generate_tokens(self, tokens_prefix: List[str], tokens_to_generate: int, top_k: int) -> List[str]:
tokens = list(tokens_prefix)
for i in range(tokens_to_generate):
# generate TOP_K potential next tokens
ntk = self.get_next_top_k(tokens, top_k)
# convert log probs to real probs
logprobs = np.array(list(map(lambda a: a[0], ntk)))
probs = np.exp(logprobs) / np.exp(logprobs).sum()
# pick next token randomly according to probs distribution
next_token_n = np.random.choice(top_k, p=probs)
next_token = ntk[next_token_n][1]
# print (next_token)
tokens.append(next_token)
return tokens
def fixed_state_dict(state_dict):
if all(k.startswith('module.') for k in state_dict):
# legacy multi-GPU format
state_dict = {k[len('module.'):]: v for k, v in state_dict.items()}
return state_dict
def gen_main(model_path, prefix, tokens_to_generate=42, top_k=8):
print("loading model from %s" % model_path)
mw = ModelWrapper.load(Path(model_path))
print("generating text for prefix %s" % prefix)
tokens = mw.tokenize(prefix)
tokens_gen = mw.generate_tokens(tokens, tokens_to_generate, top_k)
print(mw.sp_model.DecodePieces(tokens_gen))
def fire_gen_main():
fire.Fire(only_allow_defined_args(gen_main))
|
StarcoderdataPython
|
123330
|
<filename>appCore/apps/replica/contrib/insta/management/commands/insta_import.py
import requests
from urllib.parse import urlparse
from io import BytesIO
from django.core.management.base import BaseCommand, CommandError
from django.template.defaultfilters import slugify, wordcount
from django.shortcuts import render_to_response, render, get_object_or_404, redirect
from django import template
from django.core.files.base import ContentFile
from django.core import files
from replica.pulse.models import Media
from replica.contrib.insta.models import Instagram
from coreExtend.models import Account
from replica.contrib.insta import settings
from replica.contrib.insta.scraper import instagram_profile_json, instagram_profile_obj
def get_profile_media(profile, page = 0):
"""
Parse a generated media object
:param profile:
:param page:
:return:
"""
#return profile['entry_data']['ProfilePage'][page]['user']['media']['nodes']
edges = profile['entry_data']['ProfilePage'][page]['graphql']['user']['edge_owner_to_timeline_media']['edges']
return [edge['node'] for edge in edges]
class Command(BaseCommand):
help = 'Import instagram photos into media'
def add_arguments(self, parser):
parser.add_argument('instagram_username', type=str)
def handle(self, *args, **options):
insta_user = options['instagram_username']
user_obj = get_object_or_404(Account, username=insta_user)
profile_data = instagram_profile_obj(insta_user)
media_list = get_profile_media(profile_data)
print('importing from {0} user'.format(insta_user))
#print(media_list)
for media_obj in media_list:
try:
media_id = media_obj['id']
insta_media = Instagram.objects.get(instagram_id=media_id)
try:
MediaInstance.caption = media_obj['caption']
except KeyError:
pass
MediaInstance.url = media_obj['shortcode']
MediaInstance.content_type=3
img_url = media_obj['thumbnail_src']
name = urlparse(img_url).path.split('/')[-1]
response = requests.get(img_url)
if response.status_code == 200:
MediaInstance.image.save(name, ContentFile(response.content), save=True)
MediaInstance.save()
print('updating #{0}'.format(media_obj['id']))
except Instagram.DoesNotExist:
try:
insta_caption = media_obj['caption']
except KeyError:
insta_caption = ''
MediaInstance = Instagram(
instagram_id=media_obj['id'],
title = 'Instagram #{0}'.format(media_obj['id']),
slug=slugify(media_obj['id']),
caption=insta_caption,
user=user_obj,
#url=media_obj['shortcode'],
url = 'https://instagram.com/p/'.format(media_obj['shortcode']),
content_type=3 #For Instagram
)
img_url = media_obj['thumbnail_src']
name = urlparse(img_url).path.split('/')[-1]
response = requests.get(img_url)
if response.status_code == 200:
MediaInstance.image.save(name, ContentFile(response.content), save=True)
MediaInstance.save()
print('importing #{0}'.format(media_obj['id']))
|
StarcoderdataPython
|
3247085
|
<gh_stars>0
import asyncio
import base64
from pathlib import Path
from urllib import parse
import aiofiles
import aiohttp
from google_img.collectors.base import BaseCollector
from .collectors.registry import collector
def download_async(
keywords: str, output_folder: Path, collector_name: str = "google_full", hidden: bool = True
) -> None:
image_collector: BaseCollector = collector.registry[collector_name](hidden=hidden)
for keyword in keywords.split(","):
asyncio.run(
download_links_async(keyword.strip(), output_folder, image_collector, collector_name)
)
async def download_links_async(
keyword: str, download_path: Path, collector: BaseCollector, collector_name: str
) -> None:
folder = download_path / keyword
folder.mkdir(parents=True, exist_ok=True)
links = collector.collect(keyword)
async with aiohttp.ClientSession() as session:
tasks = []
tasks = [
place_file(session, link, folder, index, collector_name)
for index, link in enumerate(links)
]
await asyncio.gather(*tasks)
async def place_file(
session: aiohttp.ClientSession, source: str, folder: Path, index: int, collector_name: str
) -> None:
extension = get_extension_from_link(source)
file_name = f"{collector_name}_{str(index).zfill(4)}{extension}"
file_path = folder / file_name
if source.startswith("data:image"):
response_bytes = base64_to_object(source)
async with aiofiles.open(file_path, "wb") as f:
await f.write(response_bytes)
await f.flush()
else:
response = await session.get(source, ssl=False)
async with aiofiles.open(file_path, "wb") as f:
async for data in response.content.iter_any():
await f.write(data)
def base64_to_object(base64_url: str) -> bytes:
_, encoded = base64_url.split(",", 1)
data = base64.urlsafe_b64decode(encoded)
return data
def get_extension_from_link(link: str, default: str = ".jpg") -> str:
if link.startswith("data:image/jpeg;base64"):
return ".jpg"
if link.startswith("data:image/png;base64"):
return ".png"
path = parse.urlparse(link).path
return Path(path).suffix or default
|
StarcoderdataPython
|
6472222
|
<reponame>mys-anusha/NISB-Rosetta-Code
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 17:47:01 2021
jumbled
@author: <NAME>
"""
import random
def choose():
words=['computer','python','english','work','random','pencil','frustration','understand','words','water','apple','smartphone','laptop','quality','movie']
pick=random.choice(words)
return pick
def jumble(w):
j="".join(random.sample(w,len(w)))
return j
def play():
p1=input("Enter Player1 name ")
p2=input("Enter Player1 name ")
sp1=0
sp2=0
turn=0
while(1):
word=choose()
q=jumble(word)
print("Guess the word :",q)
if(turn%2==0):
print(p1,"'s turn")
a=input("The word is: ")
if(a==word):
sp1=sp1+1
print(p1,"'s score is",sp1)
else:
print("Ahh the word was ",word)
c=input("press c to continue e to quit: ")
if c=='e':
print("Thank you for playing \n ",p1,"'s score:",sp1,p2,"'s score:",sp2)
break
else:
print(p2,"'s turn")
a=input("The word is: ")
if(a==word):
sp2=sp2+1
print(p2,"'s score is",sp2)
else:
print("Ahh the word was ",word)
c=input("press c to continue e to quit: ")
if c=='e':
print("Thank you for playing \n ",p1,"'s score:",sp1,p2,"'s score:",sp2)
break
turn=turn+1
play()
|
StarcoderdataPython
|
9654261
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from scapy.all import *
class SensorData(Packet):
fields_desc = [
ByteField("sensor_id", 0),
ByteField("sensor_value", 0),
]
bind_layers(Ether, SensorData, type=0x842)
bind_layers(SensorData, IP)
|
StarcoderdataPython
|
1796226
|
<filename>learnpyqt/source/concurrent/qrunner_stop.py
import sys
import time
from PySide2.QtCore import QObject, QRunnable, Qt, QThreadPool, Signal, Slot
from PySide2.QtWidgets import (
QApplication,
QHBoxLayout,
QMainWindow,
QProgressBar,
QPushButton,
QWidget,
)
class WorkerKilledException(Exception):
pass
class WorkerSignals(QObject):
progress = Signal(int)
class JobRunner(QRunnable):
signals = WorkerSignals()
def __init__(self):
super().__init__()
self.is_killed = False # <1>
@Slot()
def run(self):
try:
for n in range(100):
self.signals.progress.emit(n + 1)
time.sleep(0.1)
if self.is_killed: # <2>
raise WorkerKilledException
except WorkerKilledException:
pass # <3>
def kill(self): # <4>
self.is_killed = True
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# Some buttons
w = QWidget()
l = QHBoxLayout()
w.setLayout(l)
btn_stop = QPushButton("Stop")
l.addWidget(btn_stop)
self.setCentralWidget(w)
# Create a statusbar.
self.status = self.statusBar()
self.progress = QProgressBar()
self.status.addPermanentWidget(self.progress)
# Thread runner
self.threadpool = QThreadPool()
# Create a runner
self.runner = JobRunner()
self.runner.signals.progress.connect(self.update_progress)
self.threadpool.start(self.runner)
btn_stop.pressed.connect(self.runner.kill)
self.show()
def update_progress(self, n):
self.progress.setValue(n)
app = QApplication(sys.argv)
w = MainWindow()
app.exec_()
|
StarcoderdataPython
|
333389
|
<gh_stars>0
# !/usr/bin/env python3
"""
Author: <NAME>
Date: 2021-06-03 10:10:04
LastEditTime: 2021-06-03 10:10:04
LastEditors: <NAME>
Description: unzip bagfile to asc file
FilePath:
"""
import rosbag
import rospy
import sys, getopt
import os
from datetime import datetime
from rospy import rostime
class RosAscWriter():
"""
将bag文件中的CAN帧解析为asc文件
Attributes:
topic: 指定CAN帧的话题名称
output_filename:指定输出的文件名称
start:指定CAN帧的开始时间
end:指定CAN帧的结束时间
"""
# 参数初始化
def __init__(self, topic="can0/received_messages", output_path = "", start = rospy.Time(0), end = rospy.Time(sys.maxsize)):
self.opt_topic = topic
self.out_path = output_path
self.opt_start = start
self.opt_end = end
# 根据话题类型过滤话题
def filter_can_msgs(self, topic, datatype, md5sum, msg_def, header):
if(datatype=="can_msgs/Frame"):
return True;
return False;
# 从命令行读参数
def parseArgs(self, args):
opts, opt_files = getopt.getopt(args,"hsvr:o:t:p:",["opath=","topic=","start=","end="])
for opt, arg in opts:
if opt in ("-o", "--opath"):
self.out_path = arg
elif opt in ("-t", "--topic"):
self.opt_topic = arg
elif opt in ("--start"):
self.opt_start = rospy.Time(int(arg))
elif opt in ("--end"):
self.opt_end = rospy.Time(int(arg))
return opt_files
# 将bag文件中的can信息写入asc文件
def write_output_asc(self, filename):
"""解析bag文件
Args:
filename: bag文件名称
Returns:
None
Raises:
IOError: 输入输出异常
"""
# 指定生成的asc文件名称
if self.out_path == "":
out_file = bagfile[:-4] + "_can" + ".asc"
else:
out_file = self.out_path + bagfile[:-4] + "_can" + ".asc"
if self.out_path != "":
for f in os.listdir(self.out_path):
if f.find(bagfile[:-4])>=0 and f.find("_can.asc")>=0:
os.remove(self.out_path + f)
else:
for f in os.listdir("./"):
if f.find(bagfile[:-4])>=0 and f.find("_can.asc")>=0:
os.remove(self.out_path + f)
with open(out_file,'w') as file:
# 写入asc文件表头
now = datetime.now().strftime("%a %b %m %I:%M:%S.%f %p %Y")
file.write("date %s\n" % now)
file.write("base hex timestamps absolute\n")
file.write("internal events logged\n")
# 指定数据写入格式
FORMAT_MESSAGE = "{channel} {id:<15} Rx {dtype} {data}"
FORMAT_EVENT = "{timestamp:9.6f} {message}\n"
# 打开bag文件
bag_file = filename
bag = rosbag.Bag(bag_file, "r")
i = 0
# 读取bag文件里的CAN帧信息,并按指定格式写入asc文件
for topic, msg, t in bag.read_messages(connection_filter=self.filter_can_msgs, start_time=self.opt_start, end_time=self.opt_end):
if i == 0:
# 捕获第一帧的时间戳,后续CAN帧的时间戳以此时间为基准偏移
firstFrameTimeStamp = msg.header.stamp.to_sec()
timestamp = msg.header.stamp.to_sec()
if msg.is_error:
message = "{} ErrorFrame".format(msg.channel)
# message = "{} ErrorFrame".format('1')
line = FORMAT_EVENT.format(timestamp=timestamp, message=message)
file.write(line)
i = i + 1
continue
if msg.is_rtr:
dtype = 'r'
data = []
else:
dtype = "d {}".format(msg.dlc)
canframe = msg.data[0:msg.dlc]
data = ["{:02X}".format(byte) for byte in canframe]
arb_id = "{:X}".format(msg.id)
if msg.is_extended:
arb_id += 'x'
# ros节点添加Channel后需要添加
message = FORMAT_MESSAGE.format(channel=msg.channel, id=arb_id, dtype=dtype, data=' '.join(data))
# message = FORMAT_MESSAGE.format(channel='1', id=arb_id, dtype=dtype, data=' '.join(data))
if timestamp >= firstFrameTimeStamp:
timestamp -= firstFrameTimeStamp
line = FORMAT_EVENT.format(timestamp=timestamp, message=message)
file.write(line)
i = i + 1
# 关闭打开的bag文件
bag.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('请指定bag文件!')
sys.exit(1)
else :
RosAscWriter = RosAscWriter()
try:
opt_files = RosAscWriter.parseArgs(sys.argv[1:])
except getopt.GetoptError:
sys.exit(2)
# 遍历指定的bag文件
for files in range(0,len(opt_files)):
bagfile = opt_files[files]
RosAscWriter.write_output_asc(bagfile)
print("完成!")
|
StarcoderdataPython
|
130763
|
def arithmeticExpression(a, b, c):
"""
Consider an arithmetic expression of the form a#b=c.
Check whether it is possible to replace # with one of
the four signs: +, -, * or / to obtain a correct
"""
return (
True if (a + b == c) or (a - b == c) or (a * b == c) or (a / b == c) else False
)
|
StarcoderdataPython
|
4940438
|
<gh_stars>1-10
from .install import Installer
def update(package_name: str, entrypoint_name: str = "") -> None:
installer = Installer(package=package_name, entrypoint_name=entrypoint_name)
installer.update()
|
StarcoderdataPython
|
6648822
|
<filename>Python/prova/calendario.py
aniversairo = input('Digite a data de nascimento no formado __/__/____ : ')
dia = int(aniversairo[:2])
mes = int(aniversairo[3:5])
if 20 <= dia <= 31 and mes == 3 or 1 <= dia <= 20 and mes == 4:
print('aries')
elif 21 <= dia <= 30 and mes == 4 or 1 <= dia <= 20 and mes == 5:
print('touro')
elif 21 <= dia <= 31 and mes == 5 or 1 <= dia <= 20 and mes == 6:
print('gemeos')
elif 21 <= dia <= 30 and mes == 6 or 1 <= dia <= 21 and mes == 7:
print('cancer')
elif 22 <= dia <= 31 and mes == 7 or 1 <= dia <= 22 and mes == 8:
print('leao')
|
StarcoderdataPython
|
6409957
|
<reponame>e-yuzo/distributed-systems-for-fun<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: book.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='book.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\nbook.proto\"\x17\n\tBookQuery\x12\n\n\x02id\x18\x01 \x01(\x05\"9\n\x04\x42ook\x12\x0e\n\x06\x61uthor\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x12\n\nbook_cover\x18\x03 \x01(\t\" \n\x0c\x42ookResponse\x12\x10\n\x08response\x18\x01 \x01(\x05\"%\n\rBooksResponse\x12\x14\n\x05\x62ooks\x18\x01 \x03(\x0b\x32\x05.Book2\x85\x01\n\rRelevantBooks\x12$\n\nSubmitBook\x12\x05.Book\x1a\r.BookResponse\"\x00\x12(\n\x08GetBooks\x12\n.BookQuery\x1a\x0e.BooksResponse\"\x00\x12$\n\nRemoveBook\x12\x05.Book\x1a\r.BookResponse\"\x00\x62\x06proto3')
)
_BOOKQUERY = _descriptor.Descriptor(
name='BookQuery',
full_name='BookQuery',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='BookQuery.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=14,
serialized_end=37,
)
_BOOK = _descriptor.Descriptor(
name='Book',
full_name='Book',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='author', full_name='Book.author', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='Book.title', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='book_cover', full_name='Book.book_cover', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=96,
)
_BOOKRESPONSE = _descriptor.Descriptor(
name='BookResponse',
full_name='BookResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response', full_name='BookResponse.response', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=130,
)
_BOOKSRESPONSE = _descriptor.Descriptor(
name='BooksResponse',
full_name='BooksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='books', full_name='BooksResponse.books', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=169,
)
_BOOKSRESPONSE.fields_by_name['books'].message_type = _BOOK
DESCRIPTOR.message_types_by_name['BookQuery'] = _BOOKQUERY
DESCRIPTOR.message_types_by_name['Book'] = _BOOK
DESCRIPTOR.message_types_by_name['BookResponse'] = _BOOKRESPONSE
DESCRIPTOR.message_types_by_name['BooksResponse'] = _BOOKSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BookQuery = _reflection.GeneratedProtocolMessageType('BookQuery', (_message.Message,), dict(
DESCRIPTOR = _BOOKQUERY,
__module__ = 'book_pb2'
# @@protoc_insertion_point(class_scope:BookQuery)
))
_sym_db.RegisterMessage(BookQuery)
Book = _reflection.GeneratedProtocolMessageType('Book', (_message.Message,), dict(
DESCRIPTOR = _BOOK,
__module__ = 'book_pb2'
# @@protoc_insertion_point(class_scope:Book)
))
_sym_db.RegisterMessage(Book)
BookResponse = _reflection.GeneratedProtocolMessageType('BookResponse', (_message.Message,), dict(
DESCRIPTOR = _BOOKRESPONSE,
__module__ = 'book_pb2'
# @@protoc_insertion_point(class_scope:BookResponse)
))
_sym_db.RegisterMessage(BookResponse)
BooksResponse = _reflection.GeneratedProtocolMessageType('BooksResponse', (_message.Message,), dict(
DESCRIPTOR = _BOOKSRESPONSE,
__module__ = 'book_pb2'
# @@protoc_insertion_point(class_scope:BooksResponse)
))
_sym_db.RegisterMessage(BooksResponse)
_RELEVANTBOOKS = _descriptor.ServiceDescriptor(
name='RelevantBooks',
full_name='RelevantBooks',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=172,
serialized_end=305,
methods=[
_descriptor.MethodDescriptor(
name='SubmitBook',
full_name='RelevantBooks.SubmitBook',
index=0,
containing_service=None,
input_type=_BOOK,
output_type=_BOOKRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetBooks',
full_name='RelevantBooks.GetBooks',
index=1,
containing_service=None,
input_type=_BOOKQUERY,
output_type=_BOOKSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RemoveBook',
full_name='RelevantBooks.RemoveBook',
index=2,
containing_service=None,
input_type=_BOOK,
output_type=_BOOKRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_RELEVANTBOOKS)
DESCRIPTOR.services_by_name['RelevantBooks'] = _RELEVANTBOOKS
# @@protoc_insertion_point(module_scope)
|
StarcoderdataPython
|
1877760
|
<reponame>LaudateCorpus1/lisa<gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pathlib
import re
from lisa.executable import Tool
from lisa.operating_system import Posix
from lisa.util import LisaException, constants, get_matched_str
class CodeExistsException(LisaException):
...
class Git(Tool):
CODE_FOLDER_PATTERN = re.compile(r"Cloning into '(.+)'")
CODE_FOLDER_ON_EXISTS_PATTERN = re.compile(
r"destination path '(?P<path>.*?)' already exists "
r"and is not an empty directory.",
re.M,
)
@property
def command(self) -> str:
return "git"
@property
def can_install(self) -> bool:
return True
def _install(self) -> bool:
if isinstance(self.node.os, Posix):
self.node.os.install_packages([self])
else:
raise LisaException(
"Doesn't support to install git in Windows. "
"Make sure git is installed and in PATH"
)
return self._check_exists()
def clone(
self,
url: str,
cwd: pathlib.PurePath,
ref: str = "",
dir_name: str = "",
fail_on_exists: bool = True,
) -> pathlib.PurePath:
self.node.shell.mkdir(cwd, exist_ok=True)
cmd = f"clone {url} {dir_name}"
# git print to stderr for normal info, so set no_error_log to True.
result = self.run(cmd, cwd=cwd, no_error_log=True)
if result.exit_code == 0:
output = result.stderr
if not output:
output = result.stdout
code_dir = get_matched_str(output, self.CODE_FOLDER_PATTERN)
else:
stdout = result.stdout
code_dir = get_matched_str(stdout, self.CODE_FOLDER_ON_EXISTS_PATTERN)
if code_dir:
if fail_on_exists:
raise CodeExistsException(f"code or folder exists. {stdout}")
else:
self._log.debug(f"path '{code_dir}' exists, clone skipped.")
else:
raise LisaException(f"failed to clone the repo. {stdout}")
full_path = cwd / code_dir
self._log.debug(f"code path: {full_path}")
if ref:
self.checkout(ref, cwd=full_path)
return full_path
def checkout(
self, ref: str, cwd: pathlib.PurePath, checkout_branch: str = ""
) -> None:
if not checkout_branch:
# create a temp branch to checkout tag or commit.
checkout_branch = f"{constants.RUN_ID}"
# force run to make sure checkout among branches correctly.
result = self.run(
f"checkout {ref} -b {checkout_branch}",
force_run=True,
cwd=cwd,
no_info_log=True,
no_error_log=True,
)
result.assert_exit_code(message=f"failed to checkout branch. {result.stdout}")
def pull(self, cwd: pathlib.PurePath) -> None:
result = self.run(
"pull",
force_run=True,
cwd=cwd,
no_info_log=True,
no_error_log=True,
)
result.assert_exit_code(message=f"failed to pull code. {result.stdout}")
def fetch(self, cwd: pathlib.PurePath) -> None:
result = self.run(
"fetch -p",
force_run=True,
cwd=cwd,
no_info_log=True,
no_error_log=True,
)
result.assert_exit_code(message=f"failed to fetch code. {result.stdout}")
def apply(
self,
cwd: pathlib.PurePath,
patches: pathlib.PurePath,
) -> None:
result = self.run(
f"apply {patches}",
shell=True,
cwd=cwd,
force_run=True,
no_info_log=True,
no_error_log=True,
)
result.assert_exit_code(message=f"failed on applying patches. {result.stdout}")
|
StarcoderdataPython
|
393843
|
<reponame>prplz/cadquery
from typing import (
List,
Tuple,
Union,
Any,
Callable,
Optional,
Dict,
Literal,
cast as tcast,
Type,
)
from nptyping import NDArray as Array
from math import radians
from typish import instance_of, get_type
from numpy import array, eye, pi
import nlopt
from OCP.gp import (
gp_Vec,
gp_Pln,
gp_Dir,
gp_Pnt,
gp_Trsf,
gp_Quaternion,
gp_XYZ,
gp_Lin,
gp_Intrinsic_XYZ,
)
from OCP.BRepTools import BRepTools
from OCP.Precision import Precision
from .geom import Location, Vector, Plane
from .shapes import Shape, Face, Edge, Wire
from ..types import Real
# type definitions
NoneType = type(None)
DOF6 = Tuple[float, float, float, float, float, float]
ConstraintMarker = Union[gp_Pln, gp_Dir, gp_Pnt, gp_Lin, None]
UnaryConstraintKind = Literal[
"Fixed", "FixedPoint", "FixedAxis", "FixedRotation", "FixedRotationAxis"
]
BinaryConstraintKind = Literal["Plane", "Point", "Axis", "PointInPlane", "PointOnLine"]
ConstraintKind = Literal[
"Plane",
"Point",
"Axis",
"PointInPlane",
"Fixed",
"FixedPoint",
"FixedAxis",
"PointOnLine",
"FixedRotation",
"FixedRotationAxis",
]
# (arity, marker types, param type, conversion func)
ConstraintInvariants = {
"Point": (2, (gp_Pnt, gp_Pnt), Real, None),
"Axis": (
2,
(gp_Dir, gp_Dir),
Real,
lambda x: radians(x) if x is not None else None,
),
"PointInPlane": (2, (gp_Pnt, gp_Pln), Real, None),
"PointOnLine": (2, (gp_Pnt, gp_Lin), Real, None),
"Fixed": (1, (None,), Type[None], None),
"FixedPoint": (1, (gp_Pnt,), Tuple[Real, Real, Real], None),
"FixedAxis": (1, (gp_Dir,), Tuple[Real, Real, Real], None),
"FixedRotationAxis": (
1,
(None,),
Tuple[int, Real],
lambda x: (x[0], radians(x[1])),
),
}
# translation table for compound constraints {name : (name, ...), converter}
CompoundConstraints: Dict[
ConstraintKind, Tuple[Tuple[ConstraintKind, ...], Callable[[Any], Tuple[Any, ...]]]
] = {
"Plane": (("Axis", "Point"), lambda x: (radians(x) if x is not None else None, 0)),
"FixedRotation": (
("FixedRotationAxis", "FixedRotationAxis", "FixedRotationAxis"),
lambda x: tuple(enumerate(map(radians, x))),
),
}
# constraint POD type
Constraint = Tuple[
Tuple[ConstraintMarker, ...], ConstraintKind, Optional[Any],
]
NDOF = 6
DIR_SCALING = 1e2
DIFF_EPS = 1e-10
TOL = 1e-12
MAXITER = 2000
# high-level constraint class - to be used by clients
class ConstraintSpec(object):
"""
Geometrical constraint specification between two shapes of an assembly.
"""
objects: Tuple[str, ...]
args: Tuple[Shape, ...]
sublocs: Tuple[Location, ...]
kind: ConstraintKind
param: Any
def __init__(
self,
objects: Tuple[str, ...],
args: Tuple[Shape, ...],
sublocs: Tuple[Location, ...],
kind: ConstraintKind,
param: Any = None,
):
"""
Construct a constraint.
:param objects: object names referenced in the constraint
:param args: subshapes (e.g. faces or edges) of the objects
:param sublocs: locations of the objects (only relevant if the objects are nested in a sub-assembly)
:param kind: constraint kind
:param param: optional arbitrary parameter passed to the solver
"""
# validate
if not instance_of(kind, ConstraintKind):
raise ValueError(f"Unknown constraint {kind}.")
if kind in CompoundConstraints:
kinds, convert_compound = CompoundConstraints[kind]
for k, p in zip(kinds, convert_compound(param)):
self._validate(args, k, p)
else:
self._validate(args, kind, param)
# convert here for simple constraints
convert = ConstraintInvariants[kind][-1]
param = convert(param) if convert else param
# store
self.objects = objects
self.args = args
self.sublocs = sublocs
self.kind = kind
self.param = param
def _validate(self, args: Tuple[Shape, ...], kind: ConstraintKind, param: Any):
arity, marker_types, param_type, converter = ConstraintInvariants[kind]
# check arity
if arity != len(args):
raise ValueError(
f"Invalid number of entities for constraint {kind}. Provided {len(args)}, required {arity}."
)
# check arguments
arg_check: Dict[Any, Callable[[Shape], Any]] = {
gp_Pnt: self._getPnt,
gp_Dir: self._getAxis,
gp_Pln: self._getPln,
gp_Lin: self._getLin,
None: lambda x: True, # dummy check for None marker
}
for a, t in zip(args, tcast(Tuple[Type[ConstraintMarker], ...], marker_types)):
try:
arg_check[t](a)
except ValueError:
raise ValueError(f"Unsupported entity {a} for constraint {kind}.")
# check parameter
if not instance_of(param, param_type) and param is not None:
raise ValueError(
f"Unsupported argument types {get_type(param)}, required {param_type}."
)
# check parameter conversion
try:
if param is not None and converter:
converter(param)
except Exception as e:
raise ValueError(f"Exception {e} occured in the parameter conversion")
def _getAxis(self, arg: Shape) -> gp_Dir:
if isinstance(arg, Face):
rv = arg.normalAt()
elif isinstance(arg, Edge) and arg.geomType() != "CIRCLE":
rv = arg.tangentAt()
elif isinstance(arg, Edge) and arg.geomType() == "CIRCLE":
rv = arg.normal()
else:
raise ValueError(f"Cannot construct Axis for {arg}")
return rv.toDir()
def _getPln(self, arg: Shape) -> gp_Pln:
if isinstance(arg, Face):
rv = gp_Pln(self._getPnt(arg), arg.normalAt().toDir())
elif isinstance(arg, (Edge, Wire)):
normal = arg.normal()
origin = arg.Center()
plane = Plane(origin, normal=normal)
rv = plane.toPln()
else:
raise ValueError(f"Cannot construct a plane for {arg}.")
return rv
def _getPnt(self, arg: Shape) -> gp_Pnt:
# check for infinite face
if isinstance(arg, Face) and any(
Precision.IsInfinite_s(x) for x in BRepTools.UVBounds_s(arg.wrapped)
):
# fall back to gp_Pln center
pln = arg.toPln()
center = Vector(pln.Location())
else:
center = arg.Center()
return center.toPnt()
def _getLin(self, arg: Shape) -> gp_Lin:
if isinstance(arg, (Edge, Wire)):
center = arg.Center()
tangent = arg.tangentAt()
else:
raise ValueError(f"Cannot construct a plane for {arg}.")
return gp_Lin(center.toPnt(), tangent.toDir())
def toPODs(self) -> Tuple[Constraint, ...]:
"""
Convert the constraint to a representation used by the solver.
NB: Compound constraints are decomposed into simple ones.
"""
# apply sublocation
args = tuple(
arg.located(loc * arg.location())
for arg, loc in zip(self.args, self.sublocs)
)
markers: List[Tuple[ConstraintMarker, ...]]
# convert to marker objects
if self.kind == "Axis":
markers = [(self._getAxis(args[0]), self._getAxis(args[1]),)]
elif self.kind == "Point":
markers = [(self._getPnt(args[0]), self._getPnt(args[1]))]
elif self.kind == "Plane":
markers = [
(self._getAxis(args[0]), self._getAxis(args[1]),),
(self._getPnt(args[0]), self._getPnt(args[1])),
]
elif self.kind == "PointInPlane":
markers = [(self._getPnt(args[0]), self._getPln(args[1]))]
elif self.kind == "PointOnLine":
markers = [(self._getPnt(args[0]), self._getLin(args[1]))]
elif self.kind == "Fixed":
markers = [(None,)]
elif self.kind == "FixedPoint":
markers = [(self._getPnt(args[0]),)]
elif self.kind == "FixedAxis":
markers = [(self._getAxis(args[0]),)]
elif self.kind == "FixedRotation":
markers = [(None,), (None,), (None,)]
elif self.kind == "FixedRotationAxis":
markers = [(None,)]
else:
raise ValueError(f"Unknown constraint kind {self.kind}")
# specify kinds of the simple constraint
if self.kind in CompoundConstraints:
kinds, converter = CompoundConstraints[self.kind]
params = converter(self.param,)
else:
kinds = (self.kind,)
params = (self.param,)
# builds the tuple and return
return tuple(zip(markers, kinds, params))
# Cost functions of simple constraints
def point_cost(
m1: gp_Pnt, m2: gp_Pnt, t1: gp_Trsf, t2: gp_Trsf, val: Optional[float] = None,
) -> float:
val = 0 if val is None else val
return val - (m1.Transformed(t1).XYZ() - m2.Transformed(t2).XYZ()).Modulus()
def axis_cost(
m1: gp_Dir, m2: gp_Dir, t1: gp_Trsf, t2: gp_Trsf, val: Optional[float] = None,
) -> float:
val = pi if val is None else val
return DIR_SCALING * (val - m1.Transformed(t1).Angle(m2.Transformed(t2)))
def point_in_plane_cost(
m1: gp_Pnt, m2: gp_Pln, t1: gp_Trsf, t2: gp_Trsf, val: Optional[float] = None,
) -> float:
val = 0 if val is None else val
m2_located = m2.Transformed(t2)
# offset in the plane's normal direction by val:
m2_located.Translate(gp_Vec(m2_located.Axis().Direction()).Multiplied(val))
return m2_located.Distance(m1.Transformed(t1))
def point_on_line_cost(
m1: gp_Pnt, m2: gp_Lin, t1: gp_Trsf, t2: gp_Trsf, val: Optional[float] = None,
) -> float:
val = 0 if val is None else val
m2_located = m2.Transformed(t2)
return val - m2_located.Distance(m1.Transformed(t1))
def fixed_cost(m1: Type[None], t1: gp_Trsf, val: Optional[Type[None]] = None):
return 0
def fixed_point_cost(m1: gp_Pnt, t1: gp_Trsf, val: Tuple[float, float, float]):
return (m1.Transformed(t1).XYZ() - gp_XYZ(*val)).Modulus()
def fixed_axis_cost(m1: gp_Dir, t1: gp_Trsf, val: Tuple[float, float, float]):
return DIR_SCALING * (m1.Transformed(t1).Angle(gp_Dir(*val)))
def fixed_rotation_axis_cost(m1: gp_Dir, t1: gp_Trsf, val: Tuple[int, float]):
ix, v0 = val
v = t1.GetRotation().GetEulerAngles(gp_Intrinsic_XYZ)[ix]
return v - v0
# dictionary of individual constraint cost functions
costs: Dict[str, Callable[..., float]] = dict(
Point=point_cost,
Axis=axis_cost,
PointInPlane=point_in_plane_cost,
PointOnLine=point_on_line_cost,
Fixed=fixed_cost,
FixedPoint=fixed_point_cost,
FixedAxis=fixed_axis_cost,
FixedRotationAxis=fixed_rotation_axis_cost,
)
# Actual solver class
class ConstraintSolver(object):
entities: List[DOF6]
constraints: List[Tuple[Tuple[int, ...], Constraint]]
locked: List[int]
ne: int
nc: int
def __init__(
self,
entities: List[Location],
constraints: List[Tuple[Tuple[int, ...], Constraint]],
locked: List[int] = [],
):
self.entities = [self._locToDOF6(loc) for loc in entities]
self.constraints = constraints
# additional book-keeping
self.ne = len(entities)
self.locked = locked
self.nc = len(self.constraints)
@staticmethod
def _locToDOF6(loc: Location) -> DOF6:
T = loc.wrapped.Transformation()
v = T.TranslationPart()
q = T.GetRotation()
alpha_2 = (1 - q.W()) / (1 + q.W())
a = (alpha_2 + 1) * q.X() / 2
b = (alpha_2 + 1) * q.Y() / 2
c = (alpha_2 + 1) * q.Z() / 2
return (v.X(), v.Y(), v.Z(), a, b, c)
def _build_transform(
self, x: float, y: float, z: float, a: float, b: float, c: float
) -> gp_Trsf:
rv = gp_Trsf()
m = a ** 2 + b ** 2 + c ** 2
rv.SetRotation(
gp_Quaternion(
2 * a / (m + 1), 2 * b / (m + 1), 2 * c / (m + 1), (1 - m) / (m + 1),
)
)
rv.SetTranslationPart(gp_Vec(x, y, z))
return rv
def _cost(
self,
) -> Tuple[
Callable[[Array[(Any,), float]], float],
Callable[[Array[(Any,), float], Array[(Any,), float]], None],
]:
constraints = self.constraints
ne = self.ne
delta = DIFF_EPS * eye(NDOF)
def f(x):
"""
Function to be minimized
"""
rv = 0
transforms = [
self._build_transform(*x[NDOF * i : NDOF * (i + 1)]) for i in range(ne)
]
for ks, (ms, kind, params) in constraints:
ts = tuple(
transforms[k] if k not in self.locked else gp_Trsf() for k in ks
)
cost = costs[kind]
rv += cost(*ms, *ts, params) ** 2
return rv
def grad(x, rv):
rv[:] = 0
transforms = [
self._build_transform(*x[NDOF * i : NDOF * (i + 1)]) for i in range(ne)
]
transforms_delta = [
self._build_transform(*(x[NDOF * i : NDOF * (i + 1)] + delta[j, :]))
for i in range(ne)
for j in range(NDOF)
]
for ks, (ms, kind, params) in constraints:
ts = tuple(
transforms[k] if k not in self.locked else gp_Trsf() for k in ks
)
cost = costs[kind]
tmp_0 = cost(*ms, *ts, params)
for ix, k in enumerate(ks):
if k in self.locked:
continue
for j in range(NDOF):
tkj = transforms_delta[k * NDOF + j]
ts_kj = ts[:ix] + (tkj,) + ts[ix + 1 :]
tmp_kj = cost(*ms, *ts_kj, params)
rv[k * NDOF + j] += 2 * tmp_0 * (tmp_kj - tmp_0) / DIFF_EPS
return f, grad
def solve(self) -> Tuple[List[Location], Dict[str, Any]]:
x0 = array([el for el in self.entities]).ravel()
f, grad = self._cost()
def func(x, g):
if g.size > 0:
grad(x, g)
return f(x)
opt = nlopt.opt(nlopt.LD_CCSAQ, len(x0))
opt.set_min_objective(func)
opt.set_ftol_abs(0)
opt.set_ftol_rel(0)
opt.set_xtol_rel(TOL)
opt.set_xtol_abs(TOL * 1e-3)
opt.set_maxeval(MAXITER)
x = opt.optimize(x0)
result = {
"cost": opt.last_optimum_value(),
"iters": opt.get_numevals(),
"status": opt.last_optimize_result(),
}
locs = [
Location(self._build_transform(*x[NDOF * i : NDOF * (i + 1)]))
for i in range(self.ne)
]
return locs, result
|
StarcoderdataPython
|
11375684
|
<reponame>wehak/reservoirpy<filename>reservoirpy/datasets/_seed.py
_DEFAULT_SEED = 5555
def get_seed():
"""Return the current random state seed used for dataset
generation.
Returns
-------
int
Current seed value.
"""
global _DEFAULT_SEED
return _DEFAULT_SEED
def set_seed(s: int):
"""Change the default random seed value.
This will change the behaviour of the Mackey-Glass
timeseries generator (see :py:func:`mackey_glass`).
Parameters
----------
s : int
A random state generator numerical seed.
"""
global _DEFAULT_SEED
_DEFAULT_SEED = s
|
StarcoderdataPython
|
9749042
|
import os
import unittest
from pathsjson.path import Path
class TestPath(unittest.TestCase):
def test_equal(self):
a = Path('some/path', ['a', 'b', 'c'], [1, 2, 3])
b = Path('some/path', ['a', 'b', 'c'], [1, 2, 3])
c = Path('some/path', ['A', 'B', 'C'], [1, 2, 3])
self.assertEqual(a, b)
self.assertNotEqual(a, c)
def test_hash(self):
a = Path('some/path', ['a', 'b', 'c'], [1, 2, 3])
b = Path('some/path', ['a', 'b', 'c'], [1, 2, 3])
c = Path('some/path', ['A', 'B', 'C'], [1, 2, 3])
self.assertEqual(hash(a), hash(b))
self.assertNotEqual(hash(a), hash(c))
def test_resolve_basic(self):
path_str = os.path.join("data", "clean")
path = Path(path_str)
self.assertEqual(path.resolve(), path_str)
def test_resolve_with_defaults(self):
path_tmpl = os.path.join("data", "{}", "{}")
path_str = os.path.join("data", "0.0.1", "pathsjson")
path = Path(path_tmpl, ['VERSION', 'PROJ'], ['0.0.1', 'pathsjson'])
self.assertEqual(path.resolve(), path_str)
def test_resolve_with_null_default(self):
path_tmpl = os.path.join("data", "{}")
path = Path(path_tmpl, ['VERSION'], [None])
with self.assertRaisesRegexp(TypeError, "Expected args"):
path.resolve()
def test_resolve_with_positional_overrides(self):
path_tmpl = os.path.join("data", "{}", "{}")
path = Path(path_tmpl, ['VERSION', 'PROJ'], ['0.0.1', 'pathsjson'])
path_str = os.path.join("data", "0.0.2", "vaquero")
self.assertEqual(path.resolve("0.0.2", "vaquero"), path_str)
def test_resolve_with_keyword_overrides(self):
path_tmpl = os.path.join("data", "{}", "{}")
path = Path(path_tmpl, ['VERSION', 'PROJ'], ['0.0.1', 'pathsjson'])
path_str = os.path.join("data", "0.0.2", "vaquero")
self.assertEqual(path.resolve(PROJ="vaquero", VERSION="0.0.2"),
path_str)
def test_resolve_with_too_many_args(self):
path_tmpl = os.path.join("data", "{}", "{}")
path = Path(path_tmpl, ['VERSION', 'PROJ'], ['0.0.1', 'pathsjson'])
with self.assertRaisesRegexp(TypeError, "Too many args"):
path.resolve(PROJ="vaquero", VERSION="0.0.2", MODE=1)
def test_resolve_with_implicit_skipping(self):
path_tmpl = os.path.join("{}", "{}")
path = Path(path_tmpl,
['_IMPLICIT_ROOT', 'PROJ'],
['[implicit]', 'pathsjson'])
path_str = os.path.join("[implicit]", "0.0.2")
self.assertEqual(path.resolve("0.0.2"), path_str)
|
StarcoderdataPython
|
1837500
|
# Import the Images module from pillow
from PIL import Image
import os
class rpImage():
def __init__(self,name:str,image_blob,path='/'):
self.img_name = name
self.img_blob = image_blob
self.path = path
def get_image_meta(self):
width, height, = self.img_blob.size
size = str(len(self.img_blob.fp.read()))
return {"name":self.img_name,
"path": self.path,
"width": width,
"height":height,
"size":size}
def reduce_qualtiy(output_filename,img_blob,outputdir,quality=25):
# reduce
my_file_locaion = os.path.join(outputdir,output_filename)
img_blob.save(my_file_locaion, quality=quality)
# new image
new_img_blob = Image.open(my_file_locaion)
img_obj = rpImage(output_filename,new_img_blob,path=outputdir)
# return output
return img_obj.get_image_meta()
|
StarcoderdataPython
|
3536430
|
import datetime
import re
import sys
from . import utils
import gocardless
from gocardless.exceptions import ClientError
import six
class ResourceMetaClass(type):
def __new__(meta, name, bases, attrs):
#resoures inherit date fields from superclasses
for base in bases:
if hasattr(base, "date_fields") and "date_fields" in attrs:
attrs["date_fields"].extend(base.date_fields)
return type.__new__(meta, name, bases, attrs)
@six.add_metaclass(ResourceMetaClass)
class Resource(object):
"""A GoCardless resource
Subclasses of `Resource` define class attributes to specify how
the resource is fetched and represented.
The class attribute `endpoint` is the path to the resource on the server.
The class attribute `date_fields` names fields which will be converted
into `datetime.datetime` objects on construction.
The class attribute `reference_fields` names fields which are uris to other
resources and will be converted into functions which can be called to
retrieve those resources.
"""
date_fields = ["created_at"]
reference_fields = []
def __init__(self, in_attrs, client):
"""Construct a resource
:param in_attrs: A dictionary of attributes, usually obtained from a
JSON response.
:param client: an instance of gocardless.Client
"""
attrs = in_attrs.copy()
self._raw_attrs = attrs.copy()
self.id = attrs["id"]
self.client = client
if "sub_resource_uris" in attrs:
#For each subresource_uri create a method which grabs data
#from the URI and uses it to instantiate the relevant class
#and return it.
for name, uri in six.iteritems(attrs.pop("sub_resource_uris")):
path = re.sub(".*/api/v1", "", uri)
sub_klass = self._get_klass_from_name(name)
def create_get_resource_func(the_path, the_klass):
# In python functions close over their environment so in
# order to create the correct closure we need a function
# creator, see
# http://stackoverflow.com/questions/233673/
# lexical-closures-in-python/235764#235764
def get_resources(inst, **params):
data = inst.client.api_get(the_path, params=params)
return [the_klass(attrs, self.client) for attrs in data]
return get_resources
res_func = create_get_resource_func(path, sub_klass)
func_name = "{0}".format(name)
res_func.name = func_name
setattr(self, func_name,
six.create_bound_method(res_func, self))
for fieldname in self.date_fields:
val = attrs.pop(fieldname)
if val is not None:
setattr(self, fieldname,
datetime.datetime.strptime(val, "%Y-%m-%dT%H:%M:%SZ"))
else:
setattr(self, fieldname, None)
for fieldname in self.reference_fields:
id = attrs.pop(fieldname)
def create_get_func(the_klass, the_id):
def get_referenced_resource(inst):
return the_klass.find_with_client(the_id, self.client)
return get_referenced_resource
name = fieldname.replace("_id", "")
klass = self._get_klass_from_name(name)
func = create_get_func(klass, id)
setattr(self, name, six.create_bound_method(func, self))
for key, value in six.iteritems(attrs):
setattr(self, key, value)
def _get_klass_from_name(self, name):
module = sys.modules[self.__module__]
klass = getattr(module, utils.singularize(utils.camelize(name)))
return klass
def get_endpoint(self):
return self.endpoint.replace(":id", self.id)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._raw_attrs == other._raw_attrs
return False
def __hash__(self):
return hash(self._raw_attrs["id"])
@classmethod
def find_with_client(cls, id, client):
path = cls.endpoint.replace(":id", id)
return cls(client.api_get(path), client)
@classmethod
def find(cls, id):
if not gocardless.client:
raise ClientError("You must set your account details first")
return cls.find_with_client(id, gocardless.client)
class Merchant(Resource):
endpoint = "/merchants/:id"
date_fields = ["next_payout_date"]
class Subscription(Resource):
endpoint = "/subscriptions/:id"
reference_fields = ["user_id", "merchant_id"]
date_fields = ["expires_at", "next_interval_start"]
def cancel(self):
path = "{0}/cancel".format(self.endpoint.replace(":id", self.id))
self.client.api_put(path)
class PreAuthorization(Resource):
endpoint = "/pre_authorizations/:id"
date_fields = ["expires_at", "next_interval_start"]
reference_fields = ["user_id", "merchant_id"]
def create_bill(self, amount, name=None, description=None,
charge_customer_at=None, currency=None):
return Bill.create_under_preauth(amount, self.id, self.client,
name=name, description=description,
charge_customer_at=charge_customer_at,
currency=currency)
def cancel(self):
path = "{0}/cancel".format(self.endpoint.replace(":id", self.id))
self.client.api_put(path)
class Bill(Resource):
endpoint = "/bills/:id"
date_fields = ["paid_at"]
reference_fields = ["merchant_id", "user_id", "payout_id"]
@classmethod
def create_under_preauth(self, amount, pre_auth_id, client, name=None,
description=None, charge_customer_at=None, currency=None):
path = "/bills"
params = {
"bill": {
"amount": amount,
"pre_authorization_id": pre_auth_id
}
}
if name:
params["bill"]["name"] = name
if description:
params["bill"]["description"] = description
if charge_customer_at:
params["bill"]["charge_customer_at"] = charge_customer_at
if currency:
params["bill"]["currency"] = currency
return Bill(client.api_post(path, params), client)
def retry(self):
path = "{0}/retry".format(self.endpoint.replace(":id", self.id))
self.client.api_post(path)
def cancel(self):
path = "{0}/cancel".format(self.endpoint.replace(":id", self.id))
self.client.api_put(path)
"""Please note the refund endpoint is disabled by default
If you have over 50 successful payments on your account and you
require access to the refund endpoint, please email <EMAIL>
"""
def refund(self):
path = "{0}/refund".format(self.endpoint.replace(":id", self.id))
self.client.api_post(path)
class Payout(Resource):
endpoint = "/payouts/:id"
date_fields = ["paid_at"]
class User(Resource):
endpoint = "/users/:id"
|
StarcoderdataPython
|
9735006
|
#! /usr/bin/env python
from typing import Optional
import pytest
from marshmallow.exceptions import ValidationError as MarshmallowValidationError
from marshmallow_dataclass import dataclass
import ludwig.marshmallow.marshmallow_schema_utils as lusutils
import ludwig.modules.optimization_modules as lmo
# Tests for custom dataclass/marshmallow fields:
def get_marshmallow_from_dataclass_field(dfield):
"""Helper method for checking marshmallow metadata succinctly."""
return dfield.metadata["marshmallow_field"]
def test_torch_description_pull():
example_empty_desc_prop = lusutils.unload_jsonschema_from_marshmallow_class(lmo.AdamOptimizerConfig)["properties"][
"lr"
]
assert (
isinstance(example_empty_desc_prop, dict)
and "description" in example_empty_desc_prop
and isinstance(example_empty_desc_prop["description"], str)
and len(example_empty_desc_prop["description"]) > 3
)
def test_OptimizerDataclassField():
# Test default case:
default_optimizer_field = lmo.OptimizerDataclassField()
assert default_optimizer_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(default_optimizer_field).allow_none is False
assert default_optimizer_field.default_factory() == lmo.AdamOptimizerConfig()
# Test normal cases:
optimizer_field = lmo.OptimizerDataclassField({"type": "adamax"})
assert optimizer_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(optimizer_field).allow_none is False
assert optimizer_field.default_factory() == lmo.AdamaxOptimizerConfig()
optimizer_field = lmo.OptimizerDataclassField({"type": "adamax", "betas": (0.1, 0.1)})
assert optimizer_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(optimizer_field).allow_none is False
assert optimizer_field.default_factory().betas == (0.1, 0.1)
# Test invalid default case:
with pytest.raises(MarshmallowValidationError):
lmo.OptimizerDataclassField({})
with pytest.raises(MarshmallowValidationError):
lmo.OptimizerDataclassField("test")
with pytest.raises(MarshmallowValidationError):
lmo.OptimizerDataclassField(None)
with pytest.raises(MarshmallowValidationError):
lmo.OptimizerDataclassField(1)
# Test creating a schema with default options:
@dataclass
class CustomTestSchema(lusutils.BaseMarshmallowConfig):
foo: Optional[lmo.BaseOptimizerConfig] = lmo.OptimizerDataclassField()
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": "test"})
assert CustomTestSchema.Schema().load({}).foo == lmo.AdamOptimizerConfig()
# Test creating a schema with set default:
@dataclass
class CustomTestSchema(lusutils.BaseMarshmallowConfig):
foo: Optional[lmo.BaseOptimizerConfig] = lmo.OptimizerDataclassField({"type": "adamax", "betas": (0.1, 0.1)})
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": None})
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": "test"})
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": {"type": "invalid", "betas": (0.2, 0.2)}})
assert CustomTestSchema.Schema().load({}).foo == lmo.AdamaxOptimizerConfig(betas=(0.1, 0.1))
assert CustomTestSchema.Schema().load(
{"foo": {"type": "adamax", "betas": (0.2, 0.2)}}
).foo == lmo.AdamaxOptimizerConfig(betas=(0.2, 0.2))
assert CustomTestSchema.Schema().load(
{"foo": {"type": "adamax", "betas": (0.2, 0.2), "extra_key": 1}}
).foo == lmo.AdamaxOptimizerConfig(betas=(0.2, 0.2))
def test_ClipperDataclassField():
# Test default case:
default_clipper_field = lmo.GradientClippingDataclassField()
assert default_clipper_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(default_clipper_field).allow_none is True
assert default_clipper_field.default_factory() == lmo.GradientClippingConfig()
# Test normal cases:
clipper_field = lmo.GradientClippingDataclassField({"clipglobalnorm": 0.1})
assert clipper_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(clipper_field).allow_none is True
assert clipper_field.default_factory() == lmo.GradientClippingConfig(clipglobalnorm=0.1)
clipper_field = lmo.GradientClippingDataclassField({"clipglobalnorm": None})
assert clipper_field.default_factory is not None
assert get_marshmallow_from_dataclass_field(clipper_field).allow_none is True
assert clipper_field.default_factory() == lmo.GradientClippingConfig(clipglobalnorm=None)
# Test invalid default case:
with pytest.raises(MarshmallowValidationError):
lmo.GradientClippingDataclassField("test")
with pytest.raises(MarshmallowValidationError):
lmo.GradientClippingDataclassField(None)
with pytest.raises(MarshmallowValidationError):
lmo.GradientClippingDataclassField(1)
# Test creating a schema with default options:
@dataclass
class CustomTestSchema(lusutils.BaseMarshmallowConfig):
foo: Optional[lmo.GradientClippingConfig] = lmo.GradientClippingConfig()
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": "test"})
assert CustomTestSchema.Schema().load({}).foo == lmo.GradientClippingConfig()
# Test creating a schema with set default:
@dataclass
class CustomTestSchema(lusutils.BaseMarshmallowConfig):
foo: Optional[lmo.GradientClippingConfig] = lmo.GradientClippingDataclassField({"clipglobalnorm": 0.1})
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": "test"})
with pytest.raises(MarshmallowValidationError):
CustomTestSchema.Schema().load({"foo": {"clipglobalnorm": "invalid"}})
assert CustomTestSchema.Schema().load({}).foo == lmo.GradientClippingConfig(clipglobalnorm=0.1)
assert CustomTestSchema.Schema().load({"foo": {"clipglobalnorm": 1}}).foo == lmo.GradientClippingConfig(
clipglobalnorm=1
)
assert CustomTestSchema.Schema().load(
{"foo": {"clipglobalnorm": 1, "extra_key": 1}}
).foo == lmo.GradientClippingConfig(clipglobalnorm=1)
|
StarcoderdataPython
|
1819062
|
from flask import render_template
from app.blueprints.account.views import account
@account.app_errorhandler(403)
def forbidden(_):
return render_template('errors/403.html'), 403
@account.app_errorhandler(404)
def page_not_found(_):
return render_template('errors/404.html'), 404
@account.app_errorhandler(500)
def internal_server_error(_):
return render_template('errors/500.html'), 500
|
StarcoderdataPython
|
9743143
|
import asyncio
import functools
from io import BytesIO
from typing import Any, Callable, Union
import numpy as np
from loguru import logger as log
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.style.use("dark_background")
except (ImportError, ImportWarning) as e:
log.error("Error when importing Matplotlib.", exc_info=(type(e), e, e.__traceback__))
def plot_and_save(func: Callable) -> Any:
"""Executor wrapper for different synchronous functions."""
@functools.wraps(func)
async def wrapper(*args, **kwargs) -> Any:
sync_func = functools.partial(func, *args, **kwargs)
return await asyncio.get_event_loop().run_in_executor(None, sync_func)
return wrapper
@plot_and_save
def graph_2d(
x: Union[np.ndarray, list],
y: Union[np.ndarray, list],
*,
graph_type: str = "line",
autorotate_xaxis: bool = True
) -> BytesIO:
"""Graphing points and saving said graph to a file."""
buffer = BytesIO()
fig, axis = plt.subplots()
axis.grid(True, linestyle="-.", linewidth=0.5)
if graph_type == "line":
axis.plot(x, y)
elif graph_type == "bar":
placements = np.linspace(0, len(x) + 1, len(x))
axis.bar(placements, y, 0.5)
axis.set_xticks(placements)
axis.set_xticklabels(x)
else:
raise ValueError(f"Invalid graph_type '{graph_type}'.")
if autorotate_xaxis:
plt.gcf().autofmt_xdate()
fig.savefig(buffer, format="png")
buffer.seek(0)
return buffer
|
StarcoderdataPython
|
3330494
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash_app.app import app
layout = html.Div(
[
dbc.Jumbotron(
[
html.H1('Welcome to PaIntDB!'),
dcc.Markdown(
'*Pseudomonas aeruginosa* Interactions Database',
className='lead',
),
html.Hr(className='my-2'),
dcc.Markdown(
'PaIntDB contains more than 157,000 protein-protein and protein-metabolite interactions in '
'*Pseudomonas aeruginosa* strains PAO1 and PA14.\n\n'
'It takes a list of significant genes identified through high-throughput experiments, maps the '
'interactions between them and returns a protein-protein interaction network that can be explored '
'visually and filtered to find putative biological pathways and co-expressed genes related to your '
'conditions of study.\n\n',
style={'width': '60vw',
'font-size': '20px'}
),
dbc.Button('Get Started', color='primary', id='start', href='/menu', size='lg'),
dbc.Button('User Guide', color='secondary', id='user-guide', href='/user_guide', size='lg',
style={'margin-left': '5px'})
],
style={'margin': '10px',
'backgroundColor': '#a6edff'}
),
html.Div(
[
html.A(
html.Img(src=app.get_asset_url('hancock-lab-logo.svg'),
style={'width': '175px'}),
href='http://cmdr.ubc.ca/bobh/'
),
dcc.Markdown('PaIntDB is being developed by the [Hancock Laboratory](http://cmdr.ubc.ca/bobh/) at the '
'University of British Columbia.\n\nFunding is currently provided by the Canadian '
'Institutes of Health Research FDN-154287.',
style={'margin-top': '10px'})
],
style={'font-size': '14px',
'margin-left': '10px',
'padding': '7px'}
)
],
style={'background-color': '#ededed',
'height': 'calc(100vh - 76px)'}
)
|
StarcoderdataPython
|
1893868
|
#!/usr/bin/env python
import logging
import os
import subprocess
import sys
import argparse
import shlex
logger = None
def init_logger(filename):
global logger
if logger == None:
logger = logging.getLogger()
else:
for handler in logger.handlers[:]:
logger.removeHandler(handler)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')
fh = logging.FileHandler(filename)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
def run(cmd, logfile='output.log'):
"run shell command"
logger.info('Directory: ' + os.getcwd())
logger.info('Running: ' + cmd)
cmd = ' /usr/bin/time -avo {} {} '.format(logfile, cmd)
subprocess.call(shlex.split(cmd), shell=False)
def newdir(name):
"create unique directory and chdir to it"
dir_name = name
try:
os.mkdir(dir_name)
except:
level_id = 1
while True:
try:
dir_name = '{}.{}'.format(name, level_id)
os.mkdir(dir_name)
break
except:
level_id += 1
os.chdir(dir_name)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--gatk', default=argparse.SUPPRESS, help="input path to gatk tool")
parser.add_argument('--input', default=argparse.SUPPRESS, help="input path to BAM file")
args = parser.parse_args()
if 'help' in args:
print('Usage: python gatk-printreads-test.py --gatk <path to gatk> --input <path to bam file>')
if 'gatk' in args:
gatk_path = args.gatk
else:
gatk_path = os.getcwd()+"/gatk"
if 'input' in args:
input_path = args.input
else:
input_path = os.getcwd()+"/input.bam"
gatk = os.path.abspath(gatk_path)
if not os.path.isfile(gatk):
exit("Please specify a valid gatk file using the --gatk parameter.")
print('Setting gatk path as {}'.format(gatk_path))
input_file = os.path.abspath(input_path)
if not os.path.isfile(input_file):
exit("Please specify a valid BAM file using the --input parameter.")
print('Setting input bam as {}'.format(input_path))
levels = [1, 5, 9]
newdir('test')
init_logger('run.log')
for level in levels:
# create test dir
newdir('level-{}'.format(level))
output = 'output.bam'
# run PrintReads
env = '--java-options "-Dsamjdk.compression_level={}"'.format(level)
cmd = '{} {} PrintReads --input {} --output output.bam'.format(gatk, env, input_file, output)
run(cmd)
# run CompareSAMs
cmd = '{} {} CompareSAMs {} {}'.format(gatk, env, input_file, output)
run(cmd)
os.chdir('..')
if __name__ == "__main__":
if len(sys.argv) < 6:
main(sys.argv[1:])
else:
print('Usage: python gatk-printreads-test.py --gatk </path/to/gatk> --input <path/to/input.bam>')
|
StarcoderdataPython
|
8188544
|
import re
import streamlit as st
from sagas.conf.conf import cf
# all_labels = {"Dutch":'nl', "Persian":'fa', "Japanese":'ja',
# "Korea":'ko', "Afrikaans":'af', "Russian":'ru',
# "Italian":'it', "Turkish":'tr', 'Finnish':'fi',
# 'Estonian':'et',
# "Arabic":'ar'}
# def all_labels():
from sagas.nlu.treebanks import treebanks
from sagas.nlu.utils import fix_sents
all_labels=treebanks.lang_map()
def write_styles():
st.write("<style>red{color:red} orange{color:orange} "
"yellow{color:yellow} green{color:green} "
"blue{color:blue} purple{color:purple} "
"cyan{color:blue} magenta{color:magenta} "
"</style>", unsafe_allow_html=True)
def fix_data(data):
if 'engine' not in data:
data['engine'] = cf.engine(data['lang'])
data['sents']=fix_sents(data['sents'], data['lang'])
return data
|
StarcoderdataPython
|
12813067
|
<filename>mac.py<gh_stars>0
#!/usr/bin/env python
# _*_ coding=utf-8 _*_
import Tkinter
import ttk
import socket
import binascii
import dpkt
from scapy.all import *
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def catchPcap():
dpkt = sniff(count=40)
wrpcap("demo.pcap",dpkt)
def Macclass():
f = open("demo.pcap","rb")
pcap = dpkt.pcap.Reader(f)
mac=Tkinter.Tk()
mac.title("Enthernet帧头部解析")
tree=ttk.Treeview(mac,show="headings")#表格
tree["columns"]=("源Mac地址","目的Mac地址","类型")
tree.column("源Mac地址",width=150) #表示列,不显示
tree.column("目的Mac地址",width=150)
tree.column("类型",width=50)
tree.heading("源Mac地址",text="源Mac地址") #显示表头
tree.heading("目的Mac地址",text="目的Mac地址")
tree.heading("类型",text="类型")
j=0
for (ts,buf) in pcap:
ethheader = buf[0:14]
dstmac = binascii.b2a_hex(ethheader[0:6])
srcmac = binascii.b2a_hex(ethheader[6:12])
netlayer_type = binascii.b2a_hex(ethheader[12:14])
tree.insert("",j,text=j,values=(srcmac,dstmac,netlayer_type)) #插入数据,
tree.pack()
mac.mainloop()
if __name__ == '__main__':
catchPcap()
Macclass()
|
StarcoderdataPython
|
12828206
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class RegionInstanceGroupManager(pulumi.CustomResource):
"""
The Google Compute Engine Regional Instance Group Manager API creates and manages pools
of homogeneous Compute Engine virtual machine instances from a common instance
template. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
and [API](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
~> **Note:** Use [google_compute_instance_group_manager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a single-zone instance group manager.
"""
def __init__(__self__, __name__, __opts__=None, auto_healing_policies=None, base_instance_name=None, description=None, distribution_policy_zones=None, instance_template=None, name=None, named_ports=None, project=None, region=None, rolling_update_policy=None, target_pools=None, target_size=None, update_strategy=None, versions=None, wait_for_instances=None):
"""Create a RegionInstanceGroupManager resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['auto_healing_policies'] = auto_healing_policies
if not base_instance_name:
raise TypeError('Missing required property base_instance_name')
__props__['base_instance_name'] = base_instance_name
__props__['description'] = description
__props__['distribution_policy_zones'] = distribution_policy_zones
__props__['instance_template'] = instance_template
__props__['name'] = name
__props__['named_ports'] = named_ports
__props__['project'] = project
if not region:
raise TypeError('Missing required property region')
__props__['region'] = region
__props__['rolling_update_policy'] = rolling_update_policy
__props__['target_pools'] = target_pools
__props__['target_size'] = target_size
__props__['update_strategy'] = update_strategy
__props__['versions'] = versions
__props__['wait_for_instances'] = wait_for_instances
__props__['fingerprint'] = None
__props__['instance_group'] = None
__props__['self_link'] = None
super(RegionInstanceGroupManager, __self__).__init__(
'gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager',
__name__,
__props__,
__opts__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
StarcoderdataPython
|
9695357
|
# Hiccup - Burp Suite Python Extensions
# Copyright 2012 Zynga Inc.
from burp import IBurpExtender
import sys, os, re, time, logging
from hiccup import GlobalConfig, PluginManager, FileWatcher, Message, MenuItemHandler
from hiccup import SharedFunctions as shared
class BurpExtender(IBurpExtender):
config_file = 'hiccup.yaml'
conf_watcher = None
init_level = 0 #0 at launch, 1 with fwatcher, 2 with config, 3 with callbacks, 4 with pluginmanager
init_sleep = 3 #seconds to wait before retesting for callbacks
file_watcher = None
global_config = None
plugin_manager = None
callbacks = None
logger = None
handler = None
log_level = logging.INFO #default - logging.INFO or logging.DEBUG
log_format = '[%(module)s] %(message)s'
######
### INIT FUNCTIONS
######
def __init__(self):
self.__init_logger()
self.__init_filewatcher()
self.__init_config()
def __init_logger(self):
if self.logger == None:
self.logger = logging.getLogger()
if self.global_config != None:
try:
self.log_level = logging.DEBUG if self.global_config['defaults']['log_level'] == 'debug' else logging.INFO
except TypeError, e:
self.logger.error("config file does not define global -> log_level, using default")
try:
self.log_format = self.global_config['defaults']['log_format']
except TypeError, e:
self.logger.error("config file does not define global -> log_format, using default")
self.logger.setLevel(self.log_level)
if self.handler == None:
self.handler = logging.StreamHandler(sys.stdout)
self.handler.setFormatter(logging.Formatter(self.log_format))
self.logger.addHandler(self.handler)
def __init_filewatcher(self):
try:
self.file_watcher = FileWatcher.FileWatcher('hiccup', ['GlobalConfig.py', 'PluginManager.py', 'SharedFunctions.py', 'zSharedFunctions.py', 'Message.py', 'BasePlugin.py', 'MenuItemHandler.py'])
except Exception, e:
self.logger.error("exception initializing FileWatcher : %s" % (e))
else:
self.__change_init(1)
def __init_config(self):
if os.path.isfile(self.config_file) == False:
self.logger.error("Configuration file '%s' not found." % (os.path.join(os.getcwd(), self.config_file)))
else:
self.global_config = GlobalConfig.GlobalConfig(self.config_file)
self.conf_watcher = FileWatcher.FileWatcher('.', [self.config_file,])
if self.global_config.is_valid():
self.__init_logger()
self.__change_init(2)
else:
self.__change_init(1, True)
def __init_callbacks(self, callbacks):
if self.init_level > 1:
try:
self.callbacks = callbacks
self.global_config.add_callbacks(callbacks)
except Exception, e:
self.logger.error("exception initializing callbacks : %s" % (e))
self.callbacks = None
else:
self.__change_init(3)
self.__init_menuhandler()
self.__init_pluginmanager()
def __init_pluginmanager(self):
while self.init_level != 3:
self.logger.info("waiting for Burp to finish initializing environment")
time.sleep(self.init_sleep)
try:
self.logger.debug("starting PluginManager")
self.plugin_manager = PluginManager.PluginManager(self.global_config)
self.global_config['internals']['menu_handler'].set_plugin_manager(self.plugin_manager)
except Exception, e:
self.logger.error("exception initializing PluginManager : %s" % (e))
else:
self.__change_init(4, True)
def __init_menuhandler(self):
self.global_config['internals']['menu_handler'] = MenuItemHandler.MenuItemHandler(self.global_config, self.logger, self.plugin_manager, self)
def __change_init(self, level, notify=False):
if level == 1:
self.logger.debug("switching to init_level 1")
if notify: self.logger.info("Burp will proxy messages but they will not be processed by Hiccup")
elif level == 2:
self.logger.debug("switching to init_level 2")
if notify: self.logger.info("Burp will proxy messages but they will not be processed by Hiccup")
elif level == 3:
self.logger.debug("switching to init_level 3")
if notify: self.logger.info("Burp will proxy messages but they will not be processed by Hiccup")
elif level == 4:
self.logger.debug("switching to init_level 4")
if notify: self.logger.info("Hiccup initialized")
else:
self.logger.error("__change_init to unrecognized init_level: %s" % level)
self.init_level = level
### BURP FUNCTIONS
#registerExtenderCallbacks called on startup to register callbacks object
def registerExtenderCallbacks(self, callbacks):
self.logger.debug("registerExtenderCallbacks received call (init_level:%s)" % (self.init_level))
self.__init_callbacks(callbacks)
## processHttpMessage called whenever any of Burp's tools makes an HTTP request or receives a response
## - for requests, involved immediately before request sent to network
## - for responses, invoked immediately after request is received from network
def processHttpMessage(self, toolName, messageIsRequest, messageInfo):
self.reload_on_change()
if self.init_level == 4:
messageType = toolName
messageReference = '~'
remoteHost = messageInfo.getHost()
remotePort = messageInfo.getPort()
serviceIsHttps = True if messageInfo.getProtocol() == 'https' else False
httpMethod = ''
url = '%s://%s%s' % (messageInfo.getUrl().getProtocol(), messageInfo.getUrl().getHost(), messageInfo.getUrl().getPath())
if (messageInfo.getUrl().getQuery() != None):
url = '%s?%s' % (url, messageInfo.getUrl().getQuery())
resourceType = ''
statusCode = '' if messageIsRequest else messageInfo.getStatusCode()
responseContentType = ''
messageRaw = messageInfo.getRequest() if messageIsRequest else messageInfo.getResponse()
interceptAction = ['',]
message = Message.Message(self.global_config, messageType, messageReference, messageIsRequest,
remoteHost, remotePort, serviceIsHttps, httpMethod, url, resourceType,
statusCode, responseContentType, messageRaw, interceptAction)
self.__process_message(message)
if message.is_changed():
message.update_content_length()
messageInfo.setRequest(message['headers'] + message['body']) if messageIsRequest else messageInfo.setResponse(message['headers'] + message['body'])
if message.is_highlighted():
messageInfo.setHighlight(message.get_highlight())
if message.is_commented():
messageInfo.setComment(message.get_comment())
## processProxyMessage method, called by Burp when a message is passed through the proxy.
def processProxyMessage(self, messageReference, messageIsRequest, remoteHost, remotePort,
serviceIsHttps, httpMethod, url, resourceType, statusCode,
responseContentType, messageRaw, interceptAction):
self.reload_on_change()
if self.init_level == 4:
messageType = 'proxy'
message = Message.Message(self.global_config, messageType, messageReference, messageIsRequest,
remoteHost, remotePort, serviceIsHttps, httpMethod, url,
resourceType, statusCode, responseContentType, messageRaw, interceptAction)
self.__process_message(message)
interceptAction[0] = message['interceptaction']
if message.is_changed() == False:
return message['raw']
else:
message.update_content_length()
return message['headers'] + message['body']
## applicationClosing method, called by Burp immediately before exit
def applicationClosing(self):
self.logger.info("Hiccup shutting down during Burp exit")
if (self.global_config['defaults']['auto_delete_class_files'] == True):
for fname in os.listdir(self.global_config['defaults']['plugin_directory']):
if (fname.endswith('$py.class')):
self.logger.debug("deleting stale .class file : %s" % fname)
os.remove(os.path.join(self.global_config['defaults']['plugin_directory'], fname))
######
### INTERNAL FUNCTIONS
######
# run message (request/response) through plugins via plugin_manager
def __process_message(self, message):
if (message.is_request()):
self.plugin_manager.process_request(message)
else:
self.plugin_manager.process_response(message)
# do config/module/plugin reloads, if changes detected
def reload_on_change(self):
self.logger.debug("testing for config/module/plugin changes")
if len(self.conf_watcher.get_changed()) > 0:
self.logger.info("configuration file change detected, reloading")
self.global_config.reload_from_file()
if self.global_config.is_valid() == False:
self.__change_init(1, True)
else:
self.plugin_manager = PluginManager.PluginManager(self.global_config)
self.global_config['internals']['menu_handler'].set_plugin_manager(self.plugin_manager)
self.__init_logger()
self.__change_init(4, True)
if self.init_level > 2:
for fname in self.file_watcher.get_changed():
modname = ''.join(fname.split('.')[:-1])
self.logger.info(" module change detected, reloading '%s'" % (modname))
if modname == 'BasePlugin':
self.plugin_manager = PluginManager.PluginManager(self.global_config)
self.global_config['internals']['menu_handler'].set_plugin_manager(self.plugin_manager)
else:
reload(sys.modules["hiccup." + modname])
if (modname == 'GlobalConfig'):
self.global_config = GlobalConfig.GlobalConfig(self.config_file, self.callbacks)
self.plugin_manager = PluginManager.PluginManager(self.global_config)
self.global_config['internals']['menu_handler'].set_plugin_manager(self.plugin_manager)
elif (modname == 'PluginManager'):
self.plugin_manager = PluginManager.PluginManager(self.global_config)
self.global_config['internals']['menu_handler'].set_plugin_manager(self.plugin_manager)
if self.init_level == 4:
self.plugin_manager.reload_changed()
|
StarcoderdataPython
|
1616543
|
<reponame>alexandrwang/hackmit<filename>server.py
from flask import Flask, request, redirect
import twilio.twiml
import subprocess
import json
import sklearn
import random
import datetime
from sklearn.feature_extraction import DictVectorizer
from sklearn import svm
import re
import nltk
import datetime
import traceback
app = Flask(__name__)
AMADEUS_API_KEY = '<KEY>'
LOW_FARE_URL = 'http://api.sandbox.amadeus.com/v1.2/flights/low-fare-search'
EXTENSIVE_URL = 'http://api.sandbox.amadeus.com/v1.2/flights/extensive-search'
app = Flask(__name__)
cities_regex = re.compile('(?:^|.* )([A-Z]*) to ([A-Z]*).*')
day_regex = re.compile('.*(January|February|March|April|May|June|July|August|September|October|November|December) ([0-3]?[0-9]).*')
day_regex_2 = re.compile('.*([01]?[0-9])[\-/]([0123]?[0-9]).*')
time_regex = re.compile('.*(before|after) ([01]?[0-9]) ?([AaPp][Mm]).*')
month_to_num = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12
}
def canned_responses(msg):
if len(msg) > 20:
return None
if 'thank' in msg.lower():
return 'No problem! :)'
elif 'hi' in msg.lower() or 'hey' in msg.lower() or 'hello' in msg.lower():
return 'Hi, how can I help?'
elif ('who' in msg.lower() or 'name' in msg.lower()) and '?' in msg:
return 'Hi, I\'m Emma! Nice to meet you! :)'
@app.route("/", methods=['GET', 'POST'])
def respond():
msg = request.form.get('Body')
canned = canned_responses(msg)
if canned:
resp = twilio.twiml.Response()
resp.message(canned)
return str(resp)
try:
try:
msg_params = parse_msg(msg)
except Exception:
resp = twilio.twiml.Response()
resp.message("Sorry, I didn't quite catch that. What did you mean?")
return str(resp)
today = datetime.date.today()
month = msg_params['month']
month = int(month) if len(month) < 3 else month_to_num[msg_params['month']]
day = int(msg_params['day'])
year = today.year if today < datetime.date(today.year, month, day) else today.year + 1
datestr = str(datetime.date(year, month, day))
best_time, saved_amt = find_best_time_to_buy(msg_params['origin'], msg_params['destination'], datestr)
buy_in_days = (best_time - today).days
buy_in_days_str = 'in %d days' % buy_in_days if buy_in_days > 0 else 'now'
if len(buy_in_days_str) > 3:
buy_in_days_str += ", and I saved you $%.2f" % saved_amt
if buy_in_days_str == 'now':
depart, arrive, fare = get_best_current_flight(msg_params['origin'], msg_params['destination'], datestr)
buy_in_days_str += '. The flight will depart at %s and arrive at %s in the local time of %s, and the total fare was $%s' % (depart, arrive, msg_params['destination'], fare)
resp = twilio.twiml.Response()
resp.message("Sure thing! I'll book that for you %s. Have a safe trip!" % buy_in_days_str)
return str(resp)
except Exception as e:
resp = twilio.twiml.Response()
resp.message("Oops! I had a kerfuffle. Could you ask me that again?")
traceback.print_exc()
return str(resp)
def iso_to_ordinal(iso):
return datetime.datetime.strptime(iso, '%Y-%m-%d').toordinal()
def amadeus_low_fare_request(origin, destination, departure_date, **kwargs):
"""Makes a request to Amadeus for the low fare flights according to params."""
url_params = {
'origin': origin,
'destination': destination,
'departure_date': departure_date,
}
url_params.update(kwargs)
url = LOW_FARE_URL + '?' + ('apikey=%s&' % AMADEUS_API_KEY) + '&'.join(['%s=%s' % (a, b) for a, b in url_params.iteritems()])
try:
output = subprocess.check_output(['curl', '-X', 'GET', url])
except Exception:
output = subprocess.check_output(['curl', '-X', 'GET', url])
return json.loads(output)
def amadeus_extensive_request(origin, destination, **kwargs):
"""Makes a request to Amadeus for the low fare flights according to params."""
url_params = {
'origin': origin,
'destination': destination,
'aggregation_mode': 'DAY',
}
url_params.update(kwargs)
url = EXTENSIVE_URL + '?' + ('apikey=%s&' % AMADEUS_API_KEY) + '&'.join(['%s=%s' % (a, b) for a, b in url_params.iteritems()])
try:
output = subprocess.check_output(['curl', '-X', 'GET', url])
except Exception:
output = subprocess.check_output(['curl', '-X', 'GET', url])
return json.loads(output)
def flat_flights(amadeus_res):
ret = []
for d in amadeus_res['results']:
common = set(d.keys()) - {'itineraries'}
for it in d['itineraries']:
newd = {k: d[k] for k in common}
newd.update(it)
ret.append(newd)
return ret
def parse_extensive(data):
origin = data['origin']
new_data = []
values = []
for i in data['results']:
values.append(float(i['price']))
temp = i
del temp['price']
del temp['airline']
temp[u'origin'] = origin
departure_date = iso_to_ordinal(temp['departure_date'])
return_date = iso_to_ordinal(temp['return_date'])
now = datetime.datetime.today().toordinal()
days_in_advance = departure_date - now
temp[u'departure_date'] = departure_date
temp[u'return_date'] = return_date
temp[u'days_in_advance'] = days_in_advance
new_data.append(temp)
return (new_data, values)
def get_best_current_flight(origin, destination, departure_date):
res = amadeus_low_fare_request(origin, destination, departure_date, number_of_results=1)
depart_time = res['results'][0]['itineraries'][0]['outbound']['flights'][0]['departs_at']
arrival_time = res['results'][0]['itineraries'][0]['outbound']['flights'][-1]['arrives_at']
depart_time = depart_time.split('T')[-1]
arrival_time = arrival_time.split('T')[-1]
depart_time = datetime.datetime.strptime(depart_time, "%H:%M").strftime("%I:%M %p")
arrival_time = datetime.datetime.strptime(arrival_time, "%H:%M").strftime("%I:%M %p")
fare = res['results'][0]['fare']['total_price']
return depart_time, arrival_time, fare
def find_best_time_to_buy(origin, destination, departure_date, arrive_by=None):
"""Given the parameters from a text, find the best time to buy."""
features, values = parse_extensive(amadeus_extensive_request(origin, destination))
vec = DictVectorizer()
clf = svm.SVR()
clf.fit(vec.fit_transform(features).toarray(), values)
print vec.get_feature_names()
base = {
u'origin': origin,
u'destination': destination,
u'departure_date' : iso_to_ordinal(departure_date),
u'return_date' : iso_to_ordinal(departure_date) + 7,
}
now = datetime.datetime.today().toordinal()
curr = 1000000000000.0
best_day = now
worst = 0.0
for days_in_advance in range(iso_to_ordinal(departure_date) - now + 1):
temp = base
temp[u'days_in_advance'] = days_in_advance
price = clf.predict(vec.transform(temp).toarray()) + random.uniform(-0.3,0.3)
if price < curr:
curr = price
best_day = iso_to_ordinal(departure_date) - days_in_advance
worst = max(worst, price)
best_day = min(best_day, max(iso_to_ordinal(departure_date) - 47, now))
amount_saved = worst - curr if best_day != now else 0.0
return datetime.date.fromordinal(best_day), amount_saved * 100.0
def parse_msg(msg):
origin = cities_regex.match(msg).group(1)
destination = cities_regex.match(msg).group(2)
month = ''
day = ''
try:
month = day_regex.match(msg).group(1)
day = day_regex.match(msg).group(2)
except Exception:
try:
month = day_regex_2.match(msg).group(1)
day = day_regex_2.match(msg).group(2)
except Exception:
if 'tomorrow' in msg.lower():
flight_date = datetime.date.today() + datetime.timedelta(1)
month = str(flight_date.month)
day = str(flight_date.day)
hour_side = ''
hour = ''
m = ''
try:
hour_side = time_regex.match(msg).group(1)
hour = time_regex.match(msg).group(2)
m = time_regex.match(msg).group(3)
except Exception:
pass
res = {
'origin': origin,
'destination': destination,
'month': month,
'day': day,
'hour_side': hour_side,
'hour': hour,
'm': m
}
return res
if __name__ == "__main__":
app.run(debug=True)
|
StarcoderdataPython
|
11230627
|
<filename>src/arrays/merge-intervals-2.py
def solve(intervals):
intervals.sort(reverse=True)
result = []
while len(intervals) > 0:
if len(result) == 0:
result.append(intervals.pop())
else:
prev_interval = result.pop()
next_interval = intervals.pop()
if next_interval[0] <= prev_interval[1]:
new_interval = [
prev_interval[0],
max(prev_interval[1], next_interval[1])
]
result.append(new_interval)
else:
result.append(prev_interval)
result.append(next_interval)
return result
A = [[1, 3], [2, 6], [8, 10], [15, 18]]
print(solve(A))
|
StarcoderdataPython
|
1972574
|
<reponame>avcopan/elstruct-interface
"""
Library of functions to retrieve frequency information from a Psi4 1.0 output file.
"""
__authors__ = "<NAME>, <NAME>"
__updated__ = "2019-01-15"
from ..rere import parse as repar
from ..rere import find as ref
from ..rere import pattern as rep
from ..rere import pattern_lib as relib
from ... import params
##### Series of functions to read the frequency information #####
def harm_vib_freqs_reader(output_string):
""" Reads the harmonic vibrational frequencies from the output file.
Returns the frequencies as a list of floats in cm-1.
"""
# Pattern to locate all frequencies in a string
harm_vib_freq_pattern = (
'Freq \[cm^-1\]' +
rep.one_or_more(relib.WHITESPACE) +
rep.capturing(
rep.one_or_more(relib.FLOAT +
rep.one_or_more(relib.WHITESPACE))
)
)
def harm_zpve_reader(output_string):
""" Reads the harmonic zero-point vibrational energy (ZPVE) from the output file.
Returns the ZPVE as a float; in Hartrees.
"""
# String pattern to retrieve the ZPVE
zpve_pattern = (
'Vibrational ZPE' +
rep.one_or_more(relib.WHITESPACE) +
relib.FLOAT +
rep.one_or_more(relib.WHITESPACE) +
'\[kcal/mol\]' +
rep.one_or_more(relib.WHITESPACE) +
relib.FLOAT +
rep.one_or_more(relib.WHITESPACE) +
'\[kJ/mol\]' +
rep.one_or_more(relib.WHITESPACE) +
relib.FLOAT +
rep.one_or_more(relib.WHITESPACE) +
'\[Eh\]'
rep.one_or_more(relib.WHITESPACE) +
rep.capturing(relib.FLOAT) +
rep.one_or_more(relib.WHITESPACE) +
'\[cm^-1\]'
)
# Obtain the ZPVE
harm_zpve = repar.sing_float_string(zpve_pattern, output_string)
return harm_zpve
##### Dictionary of functions to read frequency information in the files #####
FREQUENCY_READERS = {
params.FREQUENCY.HARM_FREQ : harm_vib_freqs_reader,
params.FREQUENCY.HARM_ZPVE : harm_zpve_reader
}
|
StarcoderdataPython
|
1605506
|
<reponame>Liyra/ArchiveScript
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='ArchiveScript',
version="1",
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
entry_points = {
'console_scripts':
[
'archivescript=ArchiveScript.script:main',
]
},
install_requires=[
"joblib",
"Pillow"
],
)
|
StarcoderdataPython
|
79495
|
<reponame>qlcchain/WinQ-Android-code
#!/c/python27/python
import os
from utils import *
def cli_cpp(parms):
return os.path.join(parms['OVPN3'], "core", "test", "ovpncli", "cli.cpp")
def src_fn(parms, srcfile):
# Get source file name
if srcfile:
if '.' not in os.path.basename(srcfile):
srcfile += ".cpp"
else:
srcfile = cli_cpp(parms)
return srcfile
def is_unit_test(argv):
unit_test = False
if len(argv) >= 2:
unit_test = argv[1] == "unittest"
return unit_test
def src_fn_argv(parms, argv):
srcfile = None
if len(argv) >= 1:
srcfile = argv[0]
return src_fn(parms, srcfile)
def build(parms, srcfile, unit_test=False):
# Debug?
if parms['DEBUG']:
dbg_rel_flags = "/Zi"
else:
dbg_rel_flags = "/O2"
# Dictionary we will use to substitute parameters
# onto VC command line.
options = {
"ovpn3" : parms['OVPN3'],
"tap" : os.path.join(parms['TAP'], 'src'),
"tap_component_id" : parms['TAP_WIN_COMPONENT_ID'],
"asio" : os.path.join(build_dir(parms), "asio"),
"mbedtls" : os.path.join(build_dir(parms), "mbedtls"),
"lz4" : os.path.join(build_dir(parms), "lz4", "lib"),
"srcfile" : srcfile,
"extra_defs" : parms['CPP_EXTRA'],
"extra_inc" : "",
"extra_lib_path" : "",
"extra_lib" : "",
}
vc_parms(parms, options)
# Do we need to support XP and Win 2003?
arch = os.environ.get("ARCH", parms['ARCH'])
if arch == "x86_xp":
options['extra_defs'] += " /D_WIN32_WINNT=0x0501" # pre-Vista
else:
options['extra_defs'] += " /D_WIN32_WINNT=0x0600" # Vista and later
options['extra_lib'] += " fwpuclnt.lib"
# Add jsoncpp (optional)
if 'jsoncpp' in parms['LIB_VERSIONS']:
options["jsoncpp"] = os.path.join(build_dir(parms), "jsoncpp")
options['extra_inc'] += " /DHAVE_JSONCPP /I %(jsoncpp)s/dist" % options
options['extra_lib_path'] += " /LIBPATH:%(jsoncpp)s/dist" % options
options['extra_lib'] += " jsoncpp.lib"
if unit_test:
options['extra_lib'] += " gtest.lib"
options['extra_inc'] += " /I %s" % os.path.join(parms["GTEST_ROOT"], "googletest", "include")
options['extra_lib_path'] += " /LIBPATH:%s" % os.path.join(parms["GTEST_ROOT"], "googlemock", "gtest", "Debug")
# Build OpenVPN Connect
if parms.get("CONNECT"):
options['extra_inc'] += " /I " + os.path.join(parms['OVPN3'], "common")
# build it
vc_cmd(parms, r"cl %(extra_defs)s /DNOMINMAX /D_CRT_SECURE_NO_WARNINGS /DUSE_ASIO /DASIO_STANDALONE /DASIO_NO_DEPRECATED /I %(asio)s\asio\include /DUSE_MBEDTLS /I %(mbedtls)s\include /DHAVE_LZ4 /I %(lz4)s%(extra_inc)s -DTAP_WIN_COMPONENT_ID=%(tap_component_id)s /I %(tap)s /I %(ovpn3)s\core /EHsc %(link_static_dynamic_flags)s /W0 %(dbg_rel_flags)s /nologo %(srcfile)s /link /LIBPATH:%(mbedtls)s\library /LIBPATH:%(lz4)s%(extra_lib_path)s mbedtls.lib lz4.lib%(extra_lib)s ws2_32.lib crypt32.lib iphlpapi.lib winmm.lib user32.lib gdi32.lib advapi32.lib wininet.lib shell32.lib ole32.lib rpcrt4.lib" % options, arch=os.environ.get("ARCH"))
if __name__ == "__main__":
import sys
from parms import PARMS
# some parameters might be redefined, like in Jenkins multibranch pipeline case
PARMS['BUILD'] = os.environ.get('BUILD', PARMS['BUILD'])
PARMS['OVPN3'] = os.environ.get('OVPN3', PARMS['OVPN3'])
src = src_fn_argv(PARMS, sys.argv[1:])
unit_test = is_unit_test(sys.argv[1:])
build(PARMS, src, unit_test)
|
StarcoderdataPython
|
132220
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import math
import dash_table
from app import app
import pandas as pd
data = pd.read_excel('data/2018/economic-aggregates/S1.10.xlsx')
years = data.iloc[5:6, 2:-2]
process = data[7:]
sections = process.iloc[:, 0]
main_sections = [index for index in sections.index if str(sections[index]).isdigit() or (type(sections[index]) != str and math.isnan(sections[index]))]
rows = [data.iloc[idx] for idx in main_sections]
labels = [row.iloc[1] for row in rows[0:-1]]
labelIds = main_sections
print(labelIds[1])
def generate_table(dataframe, max_rows=10):
data = pd.read_excel('data/2018/economic-aggregates/S1.10.xlsx', header = None)
df = data[6:]
df.columns = df.iloc[0].fillna(value=pd.Series(range(100)))
return(dash_table.DataTable(
data=df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={
'height': '400px',
'overflowY': 'scroll',
'border': 'thin lightgrey solid'
}))
layout = html.Div([
html.H2('Gross Capital Formation Timeseries'),
dcc.Dropdown(
id='gcf-my-dropdown',
options=[{'label': category, 'value': labelIds[idx]} for (idx, category) in enumerate(labels)],
value=labelIds[1],
style={'margin-bottom': '20px'}
),
dcc.Graph(id='gcf-time-series',
style={'padding-top': '20px'}),
generate_table(data)
], className="container")
@app.callback(Output('gcf-time-series', 'figure'),
[Input('gcf-my-dropdown', 'value')])
def update_graph(selected_dropdown_value):
index = int(selected_dropdown_value)
row = data.iloc[index][2:-2]
year_list = ['Y ' + year for year in years.values[0]]
mid = int(len(row) / 2)
return {
'data': [go.Bar(
x=year_list[:mid],
y=row[:mid],
name='Current Price'
), go.Bar(
x=year_list[mid:],
y=row[mid:],
name='Constant Price'
)],
'layout': {
'title': data.iloc[index][1]
}
}
|
StarcoderdataPython
|
130776
|
<filename>desktop/core/ext-py/phoenixdb-1.1.0/phoenixdb/sqlalchemy_phoenix.py
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import phoenixdb
from sqlalchemy import types
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.exc import CompileError
from sqlalchemy.sql.compiler import DDLCompiler
from sqlalchemy.types import BIGINT, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, NUMERIC,\
SMALLINT, TIME, TIMESTAMP, VARBINARY, VARCHAR
if sys.version_info.major == 3:
from urllib.parse import urlunsplit, SplitResult, urlencode
else:
from urllib import urlencode
from urlparse import urlunsplit, SplitResult
class PhoenixDDLCompiler(DDLCompiler):
def visit_primary_key_constraint(self, constraint):
if constraint.name is None:
raise CompileError("Can't create primary key without a name.")
return DDLCompiler.visit_primary_key_constraint(self, constraint)
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|UPSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
class PhoenixExecutionContext(DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class PhoenixDialect(DefaultDialect):
'''Phoenix dialect
dialect:: phoenix
:name: Phoenix
note::
The Phoenix dialect for SQLAlchemy is incomplete. It implements the functions required by Hue
for basic operation, but little else.
Connecting
----------
The connection URL has the format of phoenix://host:port
This format does not allow for specifying the http scheme, or the URL path the the server uses.
Setting tls=True sets the server URL scheme to https.
If the path arg is set , it used as the path of the server URL.
The phoenix-specific authentication options can be set via the standard connect_args argument.
Connecting to an unsecure server::
create_engine('phoenix://localhost:8765')
Connecting to a secure server via SPNEGO (after kinit)::
create_engine('phoenix://localhost:8765', tls=True, connect_args={'authentication': 'SPNEGO'})
Connecting to a secure server via Knox::
create_engine('phoenix://localhost:8765', tls=True, path='/gateway/avatica/'\
connect_args={'authentication':'BASIC', 'avatica_user':'user', 'avatica_password':'password'})
'''
name = "phoenix"
driver = "phoenixdb"
ddl_compiler = PhoenixDDLCompiler
execution_ctx_cls = PhoenixExecutionContext
def __init__(self, tls=False, path='/', **opts):
'''
:param tls:
If True, then use https for connecting, otherwise use http
:param path:
The path component of the connection URL
'''
# There is no way to pass these via the SqlAlchemy url object
self.tls = tls
self.path = path
super(PhoenixDialect, self).__init__(self, **opts)
@classmethod
def dbapi(cls):
return phoenixdb
def create_connect_args(self, url):
connect_args = dict()
if url.username is not None:
connect_args['user'] = url.username
if url.password is not None:
connect_args['password'] = url.username
phoenix_url = urlunsplit(SplitResult(
scheme='https' if self.tls else 'http',
netloc='{}:{}'.format(url.host, 8765 if url.port is None else url.port),
path=self.path,
query=urlencode(url.query),
fragment='',
))
return [phoenix_url], connect_args
def has_table(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
return bool(connection.connect().connection.meta().get_tables(
tableNamePattern=table_name,
schemaPattern=schema,
typeList=('TABLE', 'SYSTEM_TABLE')))
def get_schema_names(self, connection, **kw):
schemas = connection.connect().connection.meta().get_schemas()
schema_names = [schema['TABLE_SCHEM'] for schema in schemas]
# Phoenix won't return the default schema if there aren't any tables in it
if '' not in schema_names:
schema_names.insert(0, '')
return schema_names
def get_table_names(self, connection, schema=None, order_by=None, **kw):
'''order_by is ignored'''
if schema is None:
schema = ''
tables = connection.connect().connection.meta().get_tables(
schemaPattern=schema, typeList=('TABLE', 'SYSTEM TABLE'))
return [table['TABLE_NAME'] for table in tables]
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = ''
return connection.connect().connection.meta().get_tables(schemaPattern=schema,
typeList=('VIEW'))
def get_columns(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_columns(
schemaPattern=schema, tableNamePattern=table_name)
return [self._map_column(row) for row in raw]
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_primary_keys(
schema=schema, table=table_name)
cooked = {
'constrained_columns': []
}
if raw:
cooked['name'] = raw[0]['PK_NAME']
for row in raw:
cooked['constrained_columns'].insert(row['KEY_SEQ'] - 1, row['COLUMN_NAME'])
return cooked
def get_indexes(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_index_info(schema=schema, table=table_name)
# We know that Phoenix returns the rows ordered by INDEX_NAME and ORDINAL_POSITION
cooked = []
current = None
for row in raw:
if current is None or row['INDEX_NAME'] != current['name']:
current = {
'name': row['INDEX_NAME'],
'unique': not row['NON_UNIQUE'] is False,
'column_names': [],
}
cooked.append(current)
# Phoenix returns the column names in its internal representation here
# Remove the default CF prefix
canonical_name = row['INDEX_NAME']
if canonical_name.startswith('0:'):
canonical_name = canonical_name[len(':0')]
if canonical_name.startswith(':'):
canonical_name = canonical_name[len(':')]
current['column_names'].append(canonical_name)
return cooked
def get_foreign_keys(self, conn, table_name, schema=None, **kw):
'''Foreign keys are a foreign concept to Phoenix,
and SqlAlchemy cannot parse the DB schema if it's not implemented '''
return []
def _map_column(self, raw):
cooked = {}
cooked['name'] = raw['COLUMN_NAME']
cooked['type'] = COLUMN_DATA_TYPE[raw['TYPE_ID']]
cooked['nullable'] = bool(raw['IS_NULLABLE'])
cooked['autoincrement'] = bool(raw['IS_AUTOINCREMENT'])
cooked['comment'] = raw['REMARKS']
cooked['default'] = None # Not apparent how to get this from the metatdata
return cooked
class TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_INTEGER(types.Integer):
__visit_name__ = "INTEGER"
class DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_FLOAT(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_LONG(types.BIGINT):
__visit_name__ = "BIGINT"
class UNSIGNED_TIME(types.TIME):
__visit_name__ = "TIME"
class UNSIGNED_DATE(types.DATE):
__visit_name__ = "DATE"
class UNSIGNED_TIMESTAMP(types.TIMESTAMP):
__visit_name__ = "TIMESTAMP"
class ROWID (types.String):
__visit_name__ = "VARCHAR"
COLUMN_DATA_TYPE = {
-6: TINYINT,
-5: BIGINT,
-3: VARBINARY,
1: CHAR,
2: NUMERIC,
3: DECIMAL,
4: INTEGER,
5: SMALLINT,
6: FLOAT,
8: DOUBLE,
9: UNSIGNED_INTEGER,
10: UNSIGNED_LONG,
11: UNSIGNED_TINYINT,
12: VARCHAR,
13: ROWID,
14: UNSIGNED_FLOAT,
15: UNSIGNED_DOUBLE,
16: BOOLEAN,
18: UNSIGNED_TIME,
19: UNSIGNED_DATE,
20: UNSIGNED_TIMESTAMP,
91: DATE,
92: TIME,
93: TIMESTAMP
}
|
StarcoderdataPython
|
6570346
|
#!/usr/bin/env python
from datetime import datetime
from random import randint
import kivy
kivy.require('1.10.0')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.properties import DictProperty, ObjectProperty, StringProperty
from kivy.clock import Clock
from kivy.garden import iconfonts
iconfonts.register('default_font', 'fonts/icomoon.ttf', 'fonts/icomoon.fontd')
class Screen(BoxLayout):
now = ObjectProperty(datetime.now(), rebind=True)
background_image = ObjectProperty()
def __init__(self, **kwargs):
super(Screen, self).__init__(**kwargs)
self.background_image = Image(source='background.jpg').texture
self.background_image.wrap = 'repeat'
self.background_image.uvsize = (1, -1)
Clock.schedule_interval(self.update, 0.1)
self.update()
def update(self, *args):
self.now = datetime.now()
def on_page_changed(self, page):
self.ids.camera_view.play = (page == 0)
class Dashboard(BoxLayout):
data = DictProperty({}, rebind=True)
def update(self, **kwargs):
self.data.update(kwargs)
self.data = self.data
class WebDisplay(BoxLayout):
browser = ObjectProperty(None)
def reload(self, touch):
print('reload: touch.button:', touch.button)
if touch.button == 'right':
self.browser.url = 'https://maps.google.com'
class KarvyApp(App):
def build(self):
return Screen()
if __name__ == '__main__':
KarvyApp().run()
|
StarcoderdataPython
|
11348366
|
from dataclasses import dataclass
from enum import Enum
from typing import Union, Dict
@dataclass
class ExplainerDependencyReference:
"""Class for keeping track of dependencies required to Alibi runtime."""
explainer_name: str
alibi_class: str
runtime_class: str
_ANCHOR_IMAGE_TAG = "anchor_image"
_ANCHOR_TEXT_TAG = "anchor_text"
_ANCHOR_TABULAR_TAG = "anchor_tabular"
_KERNEL_SHAP_TAG = "kernel_shap"
_INTEGRATED_GRADIENTS_TAG = "integrated_gradients"
# NOTE: to add new explainers populate the below dict with a new
# ExplainerDependencyReference, referencing the specific runtime class in mlserver
# and the specific alibi explain class.
# this can be simplified when alibi moves to a config based init.
# Steps:
# update _TAG_TO_RT_IMPL
# update ExplainerEnum
_BLACKBOX_MODULE = "mlserver_alibi_explain.explainers.black_box_runtime"
_INTEGRATED_GRADIENTS_MODULE = "mlserver_alibi_explain.explainers.integrated_gradients"
_TAG_TO_RT_IMPL: Dict[str, ExplainerDependencyReference] = {
_ANCHOR_IMAGE_TAG: ExplainerDependencyReference(
explainer_name=_ANCHOR_IMAGE_TAG,
runtime_class=f"{_BLACKBOX_MODULE}.AlibiExplainBlackBoxRuntime",
alibi_class="alibi.explainers.AnchorImage",
),
_ANCHOR_TABULAR_TAG: ExplainerDependencyReference(
explainer_name=_ANCHOR_TABULAR_TAG,
runtime_class=f"{_BLACKBOX_MODULE}.AlibiExplainBlackBoxRuntime",
alibi_class="alibi.explainers.AnchorTabular",
),
_ANCHOR_TEXT_TAG: ExplainerDependencyReference(
explainer_name=_ANCHOR_TEXT_TAG,
runtime_class=f"{_BLACKBOX_MODULE}.AlibiExplainBlackBoxRuntime",
alibi_class="alibi.explainers.AnchorText",
),
_KERNEL_SHAP_TAG: ExplainerDependencyReference(
explainer_name=_KERNEL_SHAP_TAG,
runtime_class=f"{_BLACKBOX_MODULE}.AlibiExplainBlackBoxRuntime",
alibi_class="alibi.explainers.KernelShap",
),
_INTEGRATED_GRADIENTS_TAG: ExplainerDependencyReference(
explainer_name=_INTEGRATED_GRADIENTS_TAG,
runtime_class=f"{_INTEGRATED_GRADIENTS_MODULE}.IntegratedGradientsWrapper",
alibi_class="alibi.explainers.IntegratedGradients",
),
}
class ExplainerEnum(str, Enum):
anchor_image = _ANCHOR_IMAGE_TAG
anchor_text = _ANCHOR_TEXT_TAG
anchor_tabular = _ANCHOR_TABULAR_TAG
kernel_shap = _KERNEL_SHAP_TAG
integrated_gradients = _INTEGRATED_GRADIENTS_TAG
def get_mlmodel_class_as_str(tag: Union[ExplainerEnum, str]) -> str:
if isinstance(tag, ExplainerEnum):
tag = tag.value
return _TAG_TO_RT_IMPL[tag].runtime_class
def get_alibi_class_as_str(tag: Union[ExplainerEnum, str]) -> str:
if isinstance(tag, ExplainerEnum):
tag = tag.value
return _TAG_TO_RT_IMPL[tag].alibi_class
|
StarcoderdataPython
|
3412056
|
<filename>implicit/datasets/sketchfab.py
import logging
import os
import time
import h5py
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from implicit.datasets import _download
log = logging.getLogger("implicit")
URL = "https://github.com/benfred/recommender_data/releases/download/v1.0/sketchfab.hdf5"
def get_sketchfab():
"""Returns the sketchfab dataset, downloading locally if necessary.
This dataset contains about 632K likes from 62K users on 28k items collected
from the sketchfab website, as described here:
http://blog.ethanrosenthal.com/2016/10/09/likes-out-guerilla-dataset/
Returns a tuple of (items, users, likes) where likes is a CSR matrix"""
filename = os.path.join(_download.LOCAL_CACHE_DIR, "sketchfab.hdf5")
if not os.path.isfile(filename):
log.info("Downloading dataset to '%s'", filename)
_download.download_file(URL, filename)
else:
log.info("Using cached dataset at '%s'", filename)
with h5py.File(filename, "r") as f:
m = f.get("item_user_likes")
plays = csr_matrix((m.get("data"), m.get("indices"), m.get("indptr")))
return np.array(f["item"]), np.array(f["user"]), plays
def generate_dataset(filename, outputfilename):
data = _read_dataframe(filename)
_hfd5_from_dataframe(data, outputfilename)
def _read_dataframe(filename):
"""Reads the original dataset PSV as a pandas dataframe"""
import pandas
# read in triples of user/artist/playcount from the input dataset
# get a model based off the input params
start = time.time()
log.debug("reading data from %s", filename)
data = pandas.read_csv(filename, delimiter="|", quotechar="\\")
# map each artist and user to a unique numeric value
data["uid"] = data["uid"].astype("category")
data["mid"] = data["mid"].astype("category")
# store as a CSR matrix
log.debug("read data file in %s", time.time() - start)
return data
def _hfd5_from_dataframe(data, outputfilename):
items = data["mid"].cat.codes.copy()
users = data["uid"].cat.codes.copy()
values = np.ones(len(items)).astype(np.float32)
# create a sparse matrix of all the item/users/likes
likes = coo_matrix((values, (items, users))).astype(np.float32).tocsr()
with h5py.File(outputfilename, "w") as f:
g = f.create_group("item_user_likes")
g.create_dataset("data", data=likes.data)
g.create_dataset("indptr", data=likes.indptr)
g.create_dataset("indices", data=likes.indices)
dt = h5py.special_dtype(vlen=str)
item = list(data["mid"].cat.categories)
dset = f.create_dataset("item", (len(item),), dtype=dt)
dset[:] = item
user = list(data["uid"].cat.categories)
dset = f.create_dataset("user", (len(user),), dtype=dt)
dset[:] = user
|
StarcoderdataPython
|
1636783
|
# __author__ = 'artreven'
|
StarcoderdataPython
|
8064236
|
<filename>gmail/gmail_message.py
"""Get a list of Messages from the user's mailbox.
"""
from apiclient import errors
from itertools import islice
def ListMessagesMatchingQuery(service, user_id, **kwargs):
"""List all Messages of the user's mailbox matching the query.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
query: String used to filter messages returned.
Eg.- 'from:<EMAIL>' for Messages from a particular sender.
Returns:
List of Messages that match the criteria of the query. Note that the
returned list contains Message IDs, you must use get with the
appropriate ID to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id, **kwargs).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id,
pageToken=page_token,
**kwargs).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError as error:
print('An error occurred: %s' % error)
def ListMessagesMatchingSender(service, user_id, sender, **kwargs):
return ListMessagesMatchingQuery(service, user_id,
q='from:{}'.format(sender),
**kwargs)
def GetMessage(service, user_id, msg_ids, **kwargs):
"""Get a Message with given ID.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_ids: Container of : The ID of the Message required.
Returns:
A Message.
"""
msgs = []
def fetch(rid, response, exception):
if exception is not None:
print("Exception\t", exception)
else:
msgs.append(response)
# Make a batch request
msgs_it = iter(msg_ids)
n = 100
msg_chunk = islice(msgs_it, n)
while True:
batch = service.new_batch_http_request()
empty = True
for message_id in msg_chunk:
t = service.users().messages().get(userId=user_id, id=message_id,
**kwargs)
batch.add(t, callback=fetch)
empty = False
if empty:
break
batch.execute()
# print(msgs[-1]['internalDate'])
# print(msgs[-1]['payload']['headers'][0]['value'])
msg_chunk = islice(msgs_it, n)
return msgs
def GetSender(service, user_id, msg_ids, **kwargs):
"""Get a Sender of Message with given ID.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_ids: Container of : The ID of the Message required.
Returns:
A Message.
"""
return GetMessage(service=service,
user_id=user_id,
msg_ids=msg_ids,
format="metadata",
metadataHeaders=['From'],
**kwargs)
|
StarcoderdataPython
|
1971505
|
import unittest
from models.meta import Meta, Episode_Meta
class TestMeta(unittest.TestCase):
def test_meta(self):
meta = Meta(
"id",
"title",
"rating",
"image_name",
"episodes",
"description"
)
self.assertEqual(meta.id(), "id")
self.assertEqual(meta.title(), "title")
self.assertEqual(meta.rating(), "rating")
self.assertEqual(meta.image_name(), "image_name")
self.assertEqual(meta.episodes(), "episodes")
self.assertEqual(meta.description(), "description")
def test_get_episode(self):
from models.media import Episode
from models.media import Season, Show
episodes = [
Episode_Meta(1, -1, "title", "summary"),
Episode_Meta(2, 0, "title", "summary"),
Episode_Meta(3, 0, "title", "summary"),
Episode_Meta(4, 1, "title", "summary")
]
meta = Meta("id", "title", "rating", "image_name", episodes, "desc")
season1 = Season("library_path", "show_name", 1)
season2 = Season("library_path", "show_name", 2)
show1 = Show("library_path", "show_name")
episode1 = Episode("file_path", 1, "played")
episode2 = Episode("file_path", 2, "played")
episode3 = Episode("file_path", 3, "played")
episode4 = Episode("file_path", 4, "played")
episode41 = Episode("file_path", 4, "played", parent=season1)
episode42 = Episode("file_path", 4, "played", parent=season2)
episode43 = Episode("file_path", 4, "played", parent=show1)
episode5 = Episode("file_path", 5, "played")
self.assertEqual(episodes[0], meta.get_episode(episode1))
self.assertEqual(episodes[1], meta.get_episode(episode2))
self.assertEqual(episodes[2], meta.get_episode(episode3))
self.assertIsNone(meta.get_episode(episode4))
self.assertEqual(episodes[3], meta.get_episode(episode41))
self.assertIsNone(meta.get_episode(episode42))
self.assertIsNone(meta.get_episode(episode43))
self.assertIsNone(meta.get_episode(episode5))
def test_episode_meta(self):
episode_meta = Episode_Meta(
"episode_number", "season_number", "title", "summary"
)
self.assertEqual(episode_meta.episode_number(), "episode_number")
self.assertEqual(episode_meta.season_number(), "season_number")
self.assertEqual(episode_meta.title(), "title")
self.assertEqual(episode_meta.summary(), "summary")
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
8145240
|
# function zone
def summary():
global water, milk, coffee_beans, disposable_cups, money
print("The coffee machine has:")
print(f"{water} of water")
print(f"{milk} of milk")
print(f"{coffee_beans} of coffee beans")
print(f"{disposable_cups} of disposable cups")
print(f"{money} of money")
def ask_action():
print("Write action (buy, fill, take):")
answer = str(input())
return answer
def buy():
global water, milk, coffee_beans, disposable_cups, money
print("What do you want to buy? 1 - espresso, 2 - espresso, 3 - cappuccino:")
buy_coffee = int(input())
if buy_coffee == 1:
water -= 250
coffee_beans -= 16
money += 4
disposable_cups -= 1
elif buy_coffee == 2:
water -= 350
milk -= 75
coffee_beans -= 20
money += 7
disposable_cups -= 1
elif buy_coffee == 3:
water -= 200
milk -= 100
coffee_beans -= 12
money += 6
disposable_cups -= 1
def fill():
global water, milk, coffee_beans, disposable_cups
print("Write how many ml of water do you want to add:")
fill_water = int(input())
print("Write how many ml of milk do you want to add:")
fill_milk = int(input())
print("Write how many grams of coffee beans do you want to add:")
fill_coffee = int(input())
print("Write how many disposable cups of coffee do you want to add:")
fill_cups = int(input())
water += fill_water
milk += fill_milk
coffee_beans += fill_coffee
disposable_cups += fill_cups
def take():
global money
print(f"I gave you ${money}")
money = 0
# main zone
water = 400
milk = 540
coffee_beans = 120
disposable_cups = 9
money = 550
summary()
print()
action = ask_action()
if action == "buy":
buy()
elif action == "fill":
fill()
elif action == "take":
take()
print()
summary()
|
StarcoderdataPython
|
11399903
|
<gh_stars>1000+
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utilities to help with testing DeepVariant code."""
import os
from third_party.nucleus.testing import test_utils as nucleus_test_utils
GENOMICS_DIR = 'learning/genomics'
def deeptrio_testdata(filename):
"""Gets the path to filename in genomics/deepvariant/testdata.
These paths are only known at runtime, after flag parsing
has occurred.
Args:
filename: The name of a testdata file in the core genomics testdata
directory. For example, if you have a test file in
"learning/genomics/deepvariant/testdata/foo.txt", filename should be
"foo.txt" to get a path to it.
Returns:
The absolute path to a testdata file.
"""
return nucleus_test_utils.genomics_testdata(
os.path.join('deeptrio/testdata', filename), GENOMICS_DIR)
CHR20_FASTA = None
HG001_CHR20_BAM = None
NA12891_CHR20_BAM = None
NA12892_CHR20_BAM = None
GOLDEN_TRAINING_EXAMPLES = None
GOLDEN_CALLING_CANDIDATES = None
GOLDEN_CALLING_EXAMPLES = None
CONFIDENT_REGIONS_BED = None
TRUTH_VARIANTS_VCF = None
TRUTH_VARIANTS_VCF_WITH_TYPES = None
GOLDEN_POSTPROCESS_INPUT = None
GOLDEN_POSTPROCESS_OUTPUT = None
GOLDEN_POSTPROCESS_OUTPUT_COMPRESSED = None
GOLDEN_POSTPROCESS_GVCF_INPUT = None
GOLDEN_POSTPROCESS_GVCF_OUTPUT = None
GOLDEN_POSTPROCESS_GVCF_OUTPUT_COMPRESSED = None
GOLDEN_MAKE_EXAMPLES_RUN_INFO = None
WS_ALLELE_COUNT_LINEAR_MODEL = None
WS_ALLELE_COUNT_LINEAR_MODEL_PCKL = None
WS_VARIANT_READS_THRESHOLD_MODEL = None
N_GOLDEN_TRAINING_EXAMPLES = 50
N_GOLDEN_CALLING_EXAMPLES = 103
CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES = None
ALT_ALIGNED_PILEUP_GOLDEN_TRAINING_EXAMPLES = None
GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES = None
GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES_CHILD = None
def init():
"""Initialize global variables from flag values."""
global CHR20_FASTA
global HG001_CHR20_BAM
global NA12891_CHR20_BAM
global NA12892_CHR20_BAM
global GOLDEN_TRAINING_EXAMPLES
global GOLDEN_CALLING_CANDIDATES
global GOLDEN_CALLING_EXAMPLES
global CONFIDENT_REGIONS_BED
global TRUTH_VARIANTS_VCF
global TRUTH_VARIANTS_VCF_WITH_TYPES
global GOLDEN_POSTPROCESS_INPUT
global GOLDEN_POSTPROCESS_OUTPUT
global GOLDEN_POSTPROCESS_OUTPUT_COMPRESSED
global GOLDEN_POSTPROCESS_GVCF_INPUT
global GOLDEN_POSTPROCESS_GVCF_OUTPUT
global GOLDEN_POSTPROCESS_GVCF_OUTPUT_COMPRESSED
global GOLDEN_MAKE_EXAMPLES_RUN_INFO
global WS_ALLELE_COUNT_LINEAR_MODEL
global WS_ALLELE_COUNT_LINEAR_MODEL_PCKL
global WS_VARIANT_READS_THRESHOLD_MODEL
global GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES
global GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES_CHILD
CHR20_FASTA = deeptrio_testdata('input/hs37d5.chr20.fa.gz')
HG001_CHR20_BAM = deeptrio_testdata('input/HG001.chr20.10_10p1mb_sorted.bam')
NA12891_CHR20_BAM = deeptrio_testdata(
'input/NA12891.chr20.10_10p1mb_sorted.bam')
NA12892_CHR20_BAM = deeptrio_testdata(
'input/NA12892.chr20.10_10p1mb_sorted.bam')
GOLDEN_TRAINING_EXAMPLES = deeptrio_testdata(
'golden.training_examples.tfrecord.gz')
GOLDEN_CALLING_CANDIDATES = deeptrio_testdata(
'golden_child.calling_examples.tfrecord.gz')
GOLDEN_CALLING_EXAMPLES = deeptrio_testdata(
'golden_child.calling_examples.tfrecord.gz')
CONFIDENT_REGIONS_BED = deeptrio_testdata(
'input/test_giab.b37_chr20_100kbp_at_10mb.bed')
TRUTH_VARIANTS_VCF = deeptrio_testdata(
'input/HG001_chr20_GRCh37_GIAB_highconf_CG-IllFB-IllGATKHC-Ion-10X-SOLID_CHROM1-X_v.3.3.2_highconf_PGandRTGphasetransfer.vcf.gz'
)
TRUTH_VARIANTS_VCF_WITH_TYPES = deeptrio_testdata(
'input/with_types.test_nist.b37_chr20_4kbp_at_10mb.vcf.gz')
GOLDEN_POSTPROCESS_INPUT = deeptrio_testdata(
'golden.postprocess_single_site_input.tfrecord.gz')
GOLDEN_POSTPROCESS_OUTPUT = deeptrio_testdata(
'golden.postprocess_single_site_output.vcf')
GOLDEN_POSTPROCESS_OUTPUT_COMPRESSED = deeptrio_testdata(
'golden.postprocess_single_site_output.vcf.gz')
GOLDEN_POSTPROCESS_GVCF_INPUT = deeptrio_testdata(
'golden_child.postprocess_gvcf_input.tfrecord.gz')
GOLDEN_POSTPROCESS_GVCF_OUTPUT = deeptrio_testdata(
'golden.postprocess_gvcf_output.g.vcf')
GOLDEN_MAKE_EXAMPLES_RUN_INFO = deeptrio_testdata(
'golden.training_examples.tfrecord.gz.run_info.pbtxt')
WS_ALLELE_COUNT_LINEAR_MODEL = deeptrio_testdata(
'window_selector_allele_count_linear.pbtxt')
WS_ALLELE_COUNT_LINEAR_MODEL_PCKL = deeptrio_testdata(
'window_selector_allele_count_linear.pckl')
WS_VARIANT_READS_THRESHOLD_MODEL = deeptrio_testdata(
'window_selector_variant_read_threshold.pbtxt')
# For CustomizedClassesVariantLabeler.
global CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES
CUSTOMIZED_CLASSES_GOLDEN_TRAINING_EXAMPLES = deeptrio_testdata(
'customized_classes.golden.training_examples.tfrecord.gz')
# For alt-aligned pileups
global ALT_ALIGNED_PILEUP_GOLDEN_TRAINING_EXAMPLES
ALT_ALIGNED_PILEUP_GOLDEN_TRAINING_EXAMPLES = deeptrio_testdata(
'alt_aligned_pileup.golden.training_examples.tfrecord.gz')
GOLDEN_VCF_CANDIDATE_IMPORTER_TRAINING_EXAMPLES = deeptrio_testdata(
'golden.vcf_candidate_importer.training_examples.tfrecord.gz')
GOLDEN_VCF_CANDIDATE_IMPORTER_CALLING_EXAMPLES_CHILD = deeptrio_testdata(
'golden_child.vcf_candidate_importer.calling_examples.tfrecord.gz')
|
StarcoderdataPython
|
6537373
|
"""Handle Wordle Commands."""
from hikari.events.message_events import GuildMessageCreateEvent
from bot.wordle.engine import WORDLE_PATTERN, Wordle
wordle = Wordle()
def should_handle(event: GuildMessageCreateEvent) -> bool:
"""Should this event be handled?"""
return bool(
event.content
and (event.content.startswith(".wordle") or WORDLE_PATTERN.match(event.content))
)
async def handle(event: GuildMessageCreateEvent) -> None:
"""Handle incoming events."""
if event.content is None:
return
content = event.content.replace(".wordle", "").strip()
command = content.split(" ")[0]
if WORDLE_PATTERN.match(event.content):
if wordle.submit_score(event):
await event.message.add_reaction("👀")
elif command in ["me"]:
await event.message.respond(wordle.get_user_stats(str(event.author.id)))
elif wordle.is_valid_wordle(command) or command == "":
await event.message.respond(wordle.get_scores(command), user_mentions=False)
else:
await event.message.respond(
"""**Wordle Help**
.wordle <number> # Gets the wordle for that number
.wordle # Gets the wordle for today
.wordle help (this help)"""
)
|
StarcoderdataPython
|
51357
|
import os
import itertools
import re
from typing import List, Optional, Tuple, Dict, Callable, Any, NamedTuple
from string import Template
from typing import List
from tokenizers import Tokenizer, Encoding
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(
labels
): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if (
EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix])
is not None
):
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(
f'<span class="annotation" style="color:{color}" data-label="{label}">'
)
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(
text: str, encoding: Encoding, annotations: AnnotationList
) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
|
StarcoderdataPython
|
1725819
|
<reponame>cylondata/parsl
import pytest
import parsl
from parsl.app.errors import AppTimeout
@parsl.python_app
def my_app(walltime=1):
import time
time.sleep(1.2)
return True
def test_python_walltime():
f = my_app()
with pytest.raises(AppTimeout):
f.result()
def test_python_longer_walltime_at_invocation():
f = my_app(walltime=6)
f.result()
def test_python_bad_decorator_args():
with pytest.raises(TypeError):
@pytest.mark.local
@parsl.python_app(walltime=1)
def my_app_2():
import time
time.sleep(1.2)
return True
|
StarcoderdataPython
|
1853133
|
import argparse
import json
import logging
import os
from typing import Dict, List
import numpy as np
from deepdream import DeepDream
logger = logging.getLogger(__name__)
parser: argparse.ArgumentParser = argparse.ArgumentParser(description="Deep Dreams with Keras. Multiple experiments.")
parser.add_argument("base_image_path", metavar="base_image_path", type=str, help="Path to the image to transform.")
parser.add_argument("result_folder", metavar="result_folder", type=str, default="img", help="Results folder path.")
parser.add_argument("--random_n", type=int, default=0, help="Number of random iterations for layer weights.")
args: argparse.Namespace = parser.parse_args()
base_image_path: str = args.base_image_path
result_folder: str = args.result_folder
random_n: int = int(args.random_n)
if not os.path.exists(result_folder):
os.mkdir(result_folder)
if random_n == 0:
with open("experiment.json") as json_file:
config: Dict = json.load(json_file)
experiment_list: List = config["experiment"]
for experiment in experiment_list:
for experiment_name, experiment_dict in experiment.items():
logger.info(f"*** Initiating experiment {experiment_name} ***")
experiment_result_path: str = os.path.join(result_folder, experiment_name)
experiment_dict["base_image_path"] = base_image_path
experiment_dict["result_prefix"] = experiment_result_path
with open(f"{experiment_result_path}.json", "w") as json_file:
json.dump(experiment_dict, json_file)
dream: DeepDream = DeepDream.from_dict(experiment_dict)
dream.do_dream()
else:
for i in range(random_n):
logger.info(f"*** Initiating random experiment {i} ***")
random_weights: np.ndarray = np.random.dirichlet(np.ones(4)) # generate random weight with sum to 1
dream = DeepDream(base_image_path=base_image_path, result_prefix=f"{result_folder}/rexperiment{i}",
mixed2_weight=random_weights[0],
mixed3_weight=random_weights[1],
mixed4_weight=random_weights[2],
mixed5_weight=random_weights[3])
dream.do_dream()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.