text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def send_to_splunk(
session=None,
url=None,
data=None,
headers=None,
verify=False,
ssl_options=None,
timeout=10.0):
"""send_to_splunk
Send formatted msgs to Splunk. This will throw exceptions
for any errors. It is decoupled from the publishers to
make testing easier with mocks.
:param session: requests.Session
:param url: url for splunk logs
:param data: data to send
:param headers: headers for splunk
:param verify: verify certs
:param ssl_options: certs dictionary
:param timeout: timeout in seconds
"""
r = session.post(
url=url,
data=data,
headers=headers,
verify=verify,
timeout=timeout
)
r.raise_for_status() # Throws exception for 4xx/5xx status
return r
|
[
"def",
"send_to_splunk",
"(",
"session",
"=",
"None",
",",
"url",
"=",
"None",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"verify",
"=",
"False",
",",
"ssl_options",
"=",
"None",
",",
"timeout",
"=",
"10.0",
")",
":",
"r",
"=",
"session",
".",
"post",
"(",
"url",
"=",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"verify",
"=",
"verify",
",",
"timeout",
"=",
"timeout",
")",
"r",
".",
"raise_for_status",
"(",
")",
"# Throws exception for 4xx/5xx status",
"return",
"r"
] | 25.709677 | 16.612903 |
def encipher(self,string):
"""Encipher string using Enigma M3 cipher according to initialised key. Punctuation and whitespace
are removed from the input.
Example::
ciphertext = Enigma(settings=('A','A','A'),rotors=(1,2,3),reflector='B',
ringstellung=('F','V','N'),steckers=[('P','O'),('M','L'),
('I','U'),('K','J'),('N','H'),('Y','T'),('G','B'),('V','F'),
('R','E'),('D','C')])).encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string.
"""
string = self.remove_punctuation(string)
ret = ''
for c in string.upper():
if c.isalpha(): ret += self.encipher_char(c)
else: ret += c
return ret
|
[
"def",
"encipher",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"self",
".",
"remove_punctuation",
"(",
"string",
")",
"ret",
"=",
"''",
"for",
"c",
"in",
"string",
".",
"upper",
"(",
")",
":",
"if",
"c",
".",
"isalpha",
"(",
")",
":",
"ret",
"+=",
"self",
".",
"encipher_char",
"(",
"c",
")",
"else",
":",
"ret",
"+=",
"c",
"return",
"ret"
] | 39.5 | 19.8 |
def unpack(self, gpsd_socket_response):
"""Sets new socket data as DataStream attributes in those initialised dictionaries
Arguments:
gpsd_socket_response (json object):
Provides:
self attributes, e.g., self.lat, self.gdop
Raises:
AttributeError: 'str' object has no attribute 'keys' when the device falls out of the system
ValueError, KeyError: most likely extra, or mangled JSON data, should not happen, but that
applies to a lot of things.
"""
try:
fresh_data = json.loads(gpsd_socket_response) # 'class' is popped for iterator lead
class_name = fresh_data.pop('class')
for key in self.packages[class_name]:
# Fudge around the namespace collision with GST data package lat/lon being standard deviations
if class_name == 'GST' and key == 'lat' or 'lon':
setattr(self, 'sd' + key, fresh_data.get(key, 'n/a'))
setattr(self, key, fresh_data.get(key, 'n/a')) # Updates and restores 'n/a' if attribute is absent in the data
except AttributeError: # 'str' object has no attribute 'keys'
sys.stderr.write('There is an unexpected exception unpacking JSON object')
return
except (ValueError, KeyError) as error:
sys.stderr.write(str(error)) # Extra data or aberrant data in stream.
return
|
[
"def",
"unpack",
"(",
"self",
",",
"gpsd_socket_response",
")",
":",
"try",
":",
"fresh_data",
"=",
"json",
".",
"loads",
"(",
"gpsd_socket_response",
")",
"# 'class' is popped for iterator lead",
"class_name",
"=",
"fresh_data",
".",
"pop",
"(",
"'class'",
")",
"for",
"key",
"in",
"self",
".",
"packages",
"[",
"class_name",
"]",
":",
"# Fudge around the namespace collision with GST data package lat/lon being standard deviations",
"if",
"class_name",
"==",
"'GST'",
"and",
"key",
"==",
"'lat'",
"or",
"'lon'",
":",
"setattr",
"(",
"self",
",",
"'sd'",
"+",
"key",
",",
"fresh_data",
".",
"get",
"(",
"key",
",",
"'n/a'",
")",
")",
"setattr",
"(",
"self",
",",
"key",
",",
"fresh_data",
".",
"get",
"(",
"key",
",",
"'n/a'",
")",
")",
"# Updates and restores 'n/a' if attribute is absent in the data",
"except",
"AttributeError",
":",
"# 'str' object has no attribute 'keys'",
"sys",
".",
"stderr",
".",
"write",
"(",
"'There is an unexpected exception unpacking JSON object'",
")",
"return",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
"as",
"error",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"str",
"(",
"error",
")",
")",
"# Extra data or aberrant data in stream.",
"return"
] | 52.62963 | 28.740741 |
def _find_already_built_wheel(metadata_directory):
"""Check for a wheel already built during the get_wheel_metadata hook.
"""
if not metadata_directory:
return None
metadata_parent = os.path.dirname(metadata_directory)
if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
return None
whl_files = glob(os.path.join(metadata_parent, '*.whl'))
if not whl_files:
print('Found wheel built marker, but no .whl files')
return None
if len(whl_files) > 1:
print('Found multiple .whl files; unspecified behaviour. '
'Will call build_wheel.')
return None
# Exactly one .whl file
return whl_files[0]
|
[
"def",
"_find_already_built_wheel",
"(",
"metadata_directory",
")",
":",
"if",
"not",
"metadata_directory",
":",
"return",
"None",
"metadata_parent",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"metadata_directory",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"pjoin",
"(",
"metadata_parent",
",",
"WHEEL_BUILT_MARKER",
")",
")",
":",
"return",
"None",
"whl_files",
"=",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"metadata_parent",
",",
"'*.whl'",
")",
")",
"if",
"not",
"whl_files",
":",
"print",
"(",
"'Found wheel built marker, but no .whl files'",
")",
"return",
"None",
"if",
"len",
"(",
"whl_files",
")",
">",
"1",
":",
"print",
"(",
"'Found multiple .whl files; unspecified behaviour. '",
"'Will call build_wheel.'",
")",
"return",
"None",
"# Exactly one .whl file",
"return",
"whl_files",
"[",
"0",
"]"
] | 34.3 | 18.05 |
def existing_gene(store, panel_obj, hgnc_id):
"""Check if gene is already added to a panel."""
existing_genes = {gene['hgnc_id']: gene for gene in panel_obj['genes']}
return existing_genes.get(hgnc_id)
|
[
"def",
"existing_gene",
"(",
"store",
",",
"panel_obj",
",",
"hgnc_id",
")",
":",
"existing_genes",
"=",
"{",
"gene",
"[",
"'hgnc_id'",
"]",
":",
"gene",
"for",
"gene",
"in",
"panel_obj",
"[",
"'genes'",
"]",
"}",
"return",
"existing_genes",
".",
"get",
"(",
"hgnc_id",
")"
] | 52.5 | 10.5 |
def create_job_queue(self, queue_name, priority, state, compute_env_order):
"""
Create a job queue
:param queue_name: Queue name
:type queue_name: str
:param priority: Queue priority
:type priority: int
:param state: Queue state
:type state: string
:param compute_env_order: Compute environment list
:type compute_env_order: list of dict
:return: Tuple of Name, ARN
:rtype: tuple of str
"""
for variable, var_name in ((queue_name, 'jobQueueName'), (priority, 'priority'), (state, 'state'), (compute_env_order, 'computeEnvironmentOrder')):
if variable is None:
raise ClientException('{0} must be provided'.format(var_name))
if state not in ('ENABLED', 'DISABLED'):
raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state))
if self.get_job_queue_by_name(queue_name) is not None:
raise ClientException('Job queue {0} already exists'.format(queue_name))
if len(compute_env_order) == 0:
raise ClientException('At least 1 compute environment must be provided')
try:
# orders and extracts computeEnvironment names
ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])]
env_objects = []
# Check each ARN exists, then make a list of compute env's
for arn in ordered_compute_environments:
env = self.get_compute_environment_by_arn(arn)
if env is None:
raise ClientException('Compute environment {0} does not exist'.format(arn))
env_objects.append(env)
except Exception:
raise ClientException('computeEnvironmentOrder is malformed')
# Create new Job Queue
queue = JobQueue(queue_name, priority, state, env_objects, compute_env_order, self.region_name)
self._job_queues[queue.arn] = queue
return queue_name, queue.arn
|
[
"def",
"create_job_queue",
"(",
"self",
",",
"queue_name",
",",
"priority",
",",
"state",
",",
"compute_env_order",
")",
":",
"for",
"variable",
",",
"var_name",
"in",
"(",
"(",
"queue_name",
",",
"'jobQueueName'",
")",
",",
"(",
"priority",
",",
"'priority'",
")",
",",
"(",
"state",
",",
"'state'",
")",
",",
"(",
"compute_env_order",
",",
"'computeEnvironmentOrder'",
")",
")",
":",
"if",
"variable",
"is",
"None",
":",
"raise",
"ClientException",
"(",
"'{0} must be provided'",
".",
"format",
"(",
"var_name",
")",
")",
"if",
"state",
"not",
"in",
"(",
"'ENABLED'",
",",
"'DISABLED'",
")",
":",
"raise",
"ClientException",
"(",
"'state {0} must be one of ENABLED | DISABLED'",
".",
"format",
"(",
"state",
")",
")",
"if",
"self",
".",
"get_job_queue_by_name",
"(",
"queue_name",
")",
"is",
"not",
"None",
":",
"raise",
"ClientException",
"(",
"'Job queue {0} already exists'",
".",
"format",
"(",
"queue_name",
")",
")",
"if",
"len",
"(",
"compute_env_order",
")",
"==",
"0",
":",
"raise",
"ClientException",
"(",
"'At least 1 compute environment must be provided'",
")",
"try",
":",
"# orders and extracts computeEnvironment names",
"ordered_compute_environments",
"=",
"[",
"item",
"[",
"'computeEnvironment'",
"]",
"for",
"item",
"in",
"sorted",
"(",
"compute_env_order",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'order'",
"]",
")",
"]",
"env_objects",
"=",
"[",
"]",
"# Check each ARN exists, then make a list of compute env's",
"for",
"arn",
"in",
"ordered_compute_environments",
":",
"env",
"=",
"self",
".",
"get_compute_environment_by_arn",
"(",
"arn",
")",
"if",
"env",
"is",
"None",
":",
"raise",
"ClientException",
"(",
"'Compute environment {0} does not exist'",
".",
"format",
"(",
"arn",
")",
")",
"env_objects",
".",
"append",
"(",
"env",
")",
"except",
"Exception",
":",
"raise",
"ClientException",
"(",
"'computeEnvironmentOrder is malformed'",
")",
"# Create new Job Queue",
"queue",
"=",
"JobQueue",
"(",
"queue_name",
",",
"priority",
",",
"state",
",",
"env_objects",
",",
"compute_env_order",
",",
"self",
".",
"region_name",
")",
"self",
".",
"_job_queues",
"[",
"queue",
".",
"arn",
"]",
"=",
"queue",
"return",
"queue_name",
",",
"queue",
".",
"arn"
] | 46.568182 | 24.568182 |
async def iter_all(
self,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: Optional[uuid.UUID] = None,
):
"""
Read through all the events in the database.
Args:
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the database.
>>> with async.connect() as conn:
>>> async for event in conn.iter_all()
>>> print(event)
Print every event from the database in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(direction=StreamDirection.Backward):
>>> print(event)
Start reading from a known commit position
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(from_position=Position(12345))
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.IterAllEvents(
msg.Position.for_direction(direction, from_position),
batch_size,
resolve_links,
require_master,
direction,
self.credential,
correlation_id,
)
result = await self.dispatcher.start_conversation(cmd)
iterator = await result
async for event in iterator:
yield event
|
[
"async",
"def",
"iter_all",
"(",
"self",
",",
"direction",
":",
"msg",
".",
"StreamDirection",
"=",
"msg",
".",
"StreamDirection",
".",
"Forward",
",",
"from_position",
":",
"Optional",
"[",
"Union",
"[",
"msg",
".",
"Position",
",",
"msg",
".",
"_PositionSentinel",
"]",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"100",
",",
"resolve_links",
":",
"bool",
"=",
"True",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
"correlation_id",
":",
"Optional",
"[",
"uuid",
".",
"UUID",
"]",
"=",
"None",
",",
")",
":",
"correlation_id",
"=",
"correlation_id",
"cmd",
"=",
"convo",
".",
"IterAllEvents",
"(",
"msg",
".",
"Position",
".",
"for_direction",
"(",
"direction",
",",
"from_position",
")",
",",
"batch_size",
",",
"resolve_links",
",",
"require_master",
",",
"direction",
",",
"self",
".",
"credential",
",",
"correlation_id",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"iterator",
"=",
"await",
"result",
"async",
"for",
"event",
"in",
"iterator",
":",
"yield",
"event"
] | 37.387097 | 21.967742 |
def connect(self, uuid_value, wait=None):
"""Connect to a specific device by its uuid
Attempt to connect to a device that we have previously scanned using its UUID.
If wait is not None, then it is used in the same was a scan(wait) to override
default wait times with an explicit value.
Args:
uuid_value (int): The unique id of the device that we would like to connect to.
wait (float): Optional amount of time to force the device adapter to wait before
attempting to connect.
"""
if self.connected:
raise HardwareError("Cannot connect when we are already connected")
if uuid_value not in self._scanned_devices:
self.scan(wait=wait)
with self._scan_lock:
if uuid_value not in self._scanned_devices:
raise HardwareError("Could not find device to connect to by UUID", uuid=uuid_value)
connstring = self._scanned_devices[uuid_value]['connection_string']
self.connect_direct(connstring)
|
[
"def",
"connect",
"(",
"self",
",",
"uuid_value",
",",
"wait",
"=",
"None",
")",
":",
"if",
"self",
".",
"connected",
":",
"raise",
"HardwareError",
"(",
"\"Cannot connect when we are already connected\"",
")",
"if",
"uuid_value",
"not",
"in",
"self",
".",
"_scanned_devices",
":",
"self",
".",
"scan",
"(",
"wait",
"=",
"wait",
")",
"with",
"self",
".",
"_scan_lock",
":",
"if",
"uuid_value",
"not",
"in",
"self",
".",
"_scanned_devices",
":",
"raise",
"HardwareError",
"(",
"\"Could not find device to connect to by UUID\"",
",",
"uuid",
"=",
"uuid_value",
")",
"connstring",
"=",
"self",
".",
"_scanned_devices",
"[",
"uuid_value",
"]",
"[",
"'connection_string'",
"]",
"self",
".",
"connect_direct",
"(",
"connstring",
")"
] | 40.269231 | 27.346154 |
def encipher(self,string):
"""Encipher string using Foursquare cipher according to initialised key. Punctuation and whitespace
are removed from the input. If the input plaintext is not an even number of characters, an 'X' will be appended.
Example::
ciphertext = Foursquare(key1='zgptfoihmuwdrcnykeqaxvsbl',key2='mfnbdcrhsaxyogvituewlqzkp').encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string.
"""
string = self.remove_punctuation(string)
if len(string)%2 == 1: string = string + 'X'
ret = ''
for c in range(0,len(string.upper()),2):
a,b = self.encipher_pair(string[c],string[c+1])
ret += a + b
return ret
|
[
"def",
"encipher",
"(",
"self",
",",
"string",
")",
":",
"string",
"=",
"self",
".",
"remove_punctuation",
"(",
"string",
")",
"if",
"len",
"(",
"string",
")",
"%",
"2",
"==",
"1",
":",
"string",
"=",
"string",
"+",
"'X'",
"ret",
"=",
"''",
"for",
"c",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"string",
".",
"upper",
"(",
")",
")",
",",
"2",
")",
":",
"a",
",",
"b",
"=",
"self",
".",
"encipher_pair",
"(",
"string",
"[",
"c",
"]",
",",
"string",
"[",
"c",
"+",
"1",
"]",
")",
"ret",
"+=",
"a",
"+",
"b",
"return",
"ret"
] | 43.166667 | 24.5 |
def auth(self):
"""
Auth is used to call the AUTH API of CricketAPI.
Access token required for every request call to CricketAPI.
Auth functional will post user Cricket API app details to server
and return the access token.
Return:
Access token
"""
if not self.store_handler.has_value('access_token'):
params = {}
params["access_key"] = self.access_key
params["secret_key"] = self.secret_key
params["app_id"] = self.app_id
params["device_id"] = self.device_id
auth_url = self.api_path + "auth/"
response = self.get_response(auth_url, params, "post")
if 'auth' in response:
self.store_handler.set_value("access_token", response['auth']['access_token'])
self.store_handler.set_value("expires", response['auth']['expires'])
logger.info('Getting new access token')
else:
msg = "Error getting access_token, " + \
"please verify your access_key, secret_key and app_id"
logger.error(msg)
raise Exception("Auth Failed, please check your access details")
|
[
"def",
"auth",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"store_handler",
".",
"has_value",
"(",
"'access_token'",
")",
":",
"params",
"=",
"{",
"}",
"params",
"[",
"\"access_key\"",
"]",
"=",
"self",
".",
"access_key",
"params",
"[",
"\"secret_key\"",
"]",
"=",
"self",
".",
"secret_key",
"params",
"[",
"\"app_id\"",
"]",
"=",
"self",
".",
"app_id",
"params",
"[",
"\"device_id\"",
"]",
"=",
"self",
".",
"device_id",
"auth_url",
"=",
"self",
".",
"api_path",
"+",
"\"auth/\"",
"response",
"=",
"self",
".",
"get_response",
"(",
"auth_url",
",",
"params",
",",
"\"post\"",
")",
"if",
"'auth'",
"in",
"response",
":",
"self",
".",
"store_handler",
".",
"set_value",
"(",
"\"access_token\"",
",",
"response",
"[",
"'auth'",
"]",
"[",
"'access_token'",
"]",
")",
"self",
".",
"store_handler",
".",
"set_value",
"(",
"\"expires\"",
",",
"response",
"[",
"'auth'",
"]",
"[",
"'expires'",
"]",
")",
"logger",
".",
"info",
"(",
"'Getting new access token'",
")",
"else",
":",
"msg",
"=",
"\"Error getting access_token, \"",
"+",
"\"please verify your access_key, secret_key and app_id\"",
"logger",
".",
"error",
"(",
"msg",
")",
"raise",
"Exception",
"(",
"\"Auth Failed, please check your access details\"",
")"
] | 42.482759 | 20.482759 |
def _get_co_from_dump(data):
"""Return the code objects from the dump."""
# Read py2exe header
current = struct.calcsize(b'iiii')
metadata = struct.unpack(b'iiii', data[:current])
# check py2exe magic number
# assert(metadata[0] == 0x78563412)
logging.info("Magic value: %x", metadata[0])
logging.info("Code bytes length: %d", metadata[3])
arcname = ''
while six.indexbytes(data, current) != 0:
arcname += chr(six.indexbytes(data, current))
current += 1
logging.info("Archive name: %s", arcname or '-')
code_bytes = data[current + 1:]
# verify code bytes count and metadata info
# assert(len(code_bytes) == metadata[3])
code_objects = marshal.loads(code_bytes)
return code_objects
|
[
"def",
"_get_co_from_dump",
"(",
"data",
")",
":",
"# Read py2exe header",
"current",
"=",
"struct",
".",
"calcsize",
"(",
"b'iiii'",
")",
"metadata",
"=",
"struct",
".",
"unpack",
"(",
"b'iiii'",
",",
"data",
"[",
":",
"current",
"]",
")",
"# check py2exe magic number",
"# assert(metadata[0] == 0x78563412)",
"logging",
".",
"info",
"(",
"\"Magic value: %x\"",
",",
"metadata",
"[",
"0",
"]",
")",
"logging",
".",
"info",
"(",
"\"Code bytes length: %d\"",
",",
"metadata",
"[",
"3",
"]",
")",
"arcname",
"=",
"''",
"while",
"six",
".",
"indexbytes",
"(",
"data",
",",
"current",
")",
"!=",
"0",
":",
"arcname",
"+=",
"chr",
"(",
"six",
".",
"indexbytes",
"(",
"data",
",",
"current",
")",
")",
"current",
"+=",
"1",
"logging",
".",
"info",
"(",
"\"Archive name: %s\"",
",",
"arcname",
"or",
"'-'",
")",
"code_bytes",
"=",
"data",
"[",
"current",
"+",
"1",
":",
"]",
"# verify code bytes count and metadata info",
"# assert(len(code_bytes) == metadata[3])",
"code_objects",
"=",
"marshal",
".",
"loads",
"(",
"code_bytes",
")",
"return",
"code_objects"
] | 32.26087 | 15.043478 |
def process(self, metric):
"""
Process a metric by converting metric name to MQTT topic name;
the payload is metric and timestamp.
"""
if not mosquitto:
return
line = str(metric)
topic, value, timestamp = line.split()
if len(self.prefix):
topic = "%s/%s" % (self.prefix, topic)
topic = topic.replace('.', '/')
topic = topic.replace('#', '&') # Topic must not contain wildcards
if self.timestamp == 0:
self.mqttc.publish(topic, "%s" % (value), self.qos)
else:
self.mqttc.publish(topic, "%s %s" % (value, timestamp), self.qos)
|
[
"def",
"process",
"(",
"self",
",",
"metric",
")",
":",
"if",
"not",
"mosquitto",
":",
"return",
"line",
"=",
"str",
"(",
"metric",
")",
"topic",
",",
"value",
",",
"timestamp",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"self",
".",
"prefix",
")",
":",
"topic",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"prefix",
",",
"topic",
")",
"topic",
"=",
"topic",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
"topic",
"=",
"topic",
".",
"replace",
"(",
"'#'",
",",
"'&'",
")",
"# Topic must not contain wildcards",
"if",
"self",
".",
"timestamp",
"==",
"0",
":",
"self",
".",
"mqttc",
".",
"publish",
"(",
"topic",
",",
"\"%s\"",
"%",
"(",
"value",
")",
",",
"self",
".",
"qos",
")",
"else",
":",
"self",
".",
"mqttc",
".",
"publish",
"(",
"topic",
",",
"\"%s %s\"",
"%",
"(",
"value",
",",
"timestamp",
")",
",",
"self",
".",
"qos",
")"
] | 32.8 | 19.1 |
def load_lists(keys=[], values=[], name='NT'):
""" Map namedtuples given a pair of key, value lists. """
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name)
|
[
"def",
"load_lists",
"(",
"keys",
"=",
"[",
"]",
",",
"values",
"=",
"[",
"]",
",",
"name",
"=",
"'NT'",
")",
":",
"mapping",
"=",
"dict",
"(",
"zip",
"(",
"keys",
",",
"values",
")",
")",
"return",
"mapper",
"(",
"mapping",
",",
"_nt_name",
"=",
"name",
")"
] | 46.25 | 2.5 |
def pretty_table(rows, header=None):
"""
Returns a string with a simple pretty table representing the given rows.
Rows can be:
- Sequences such as lists or tuples
- Mappings such as dicts
- Any object with a __dict__ attribute (most plain python objects) which is
equivalent to passing the __dict__ directly.
If no header is given then either all or none of the rows must be sequences
to ensure the correct order. If there are no sequences then the header will be
derived from the keys of the mappings.
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]]))
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]], header='col1 col2 col3 col4'))
col1 | col2 | col3 | col4
---------------------------
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))
a | b
-----
1 | 2
3 | 4
>>> class C(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]], header=['b', 'a']))
b | a
-----
2 | 1
4 | 3
5 | 6
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]]))
Traceback (most recent call last):
...
ValueError: Cannot mix sequences and other types of rows without specifying a header
>>> print(pretty_table([[1, 2], [3, 4, 5]]))
Traceback (most recent call last):
...
ValueError: Mismatched lengths.
First row (len = 2):
[1, 2]
Current row (len = 3):
[3, 4, 5]
>>> print(pretty_table([{'a': 1, 'b': 2}], header='c d'))
Traceback (most recent call last):
....
KeyError: "Tried to access 'c', only keys are: ['a', 'b']"
"""
rows2 = []
if header:
header = ensure_list_if_string(header)
rows2.insert(0, header)
row_type = ['any']
else:
header = []
row_type = [None]
def require_type(t):
if row_type[0] not in (None, t, 'any'):
raise ValueError('Cannot mix sequences and other types of rows without specifying a header')
if row_type[0] is None:
row_type[0] = t
def handle_dict(d):
require_type('mapping')
if not header:
header[:] = sorted(d.keys())
rows2.insert(0, header)
return [helpful_error_dict_get(d, key) for key in header]
for row in rows:
if isinstance(row, Mapping):
row = handle_dict(row)
elif isinstance(row, Sequence):
require_type('sequence')
if rows2 and len(row) != len(rows2[0]):
raise ValueError('Mismatched lengths.\n'
'First row (len = %s):\n%s\n'
'Current row (len = %s):\n%s' %
(len(rows2[0]), rows2[0], len(row), row))
else:
row = handle_dict(row.__dict__)
rows2.append(row)
rows = [[str(cell) for cell in row] for row in rows2]
widths = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
lines = [' | '.join(cell.ljust(width) for cell, width in zip(row, widths)).strip()
for row in rows]
if header:
lines.insert(1, '-' * len(lines[0]))
return '\n'.join(lines)
|
[
"def",
"pretty_table",
"(",
"rows",
",",
"header",
"=",
"None",
")",
":",
"rows2",
"=",
"[",
"]",
"if",
"header",
":",
"header",
"=",
"ensure_list_if_string",
"(",
"header",
")",
"rows2",
".",
"insert",
"(",
"0",
",",
"header",
")",
"row_type",
"=",
"[",
"'any'",
"]",
"else",
":",
"header",
"=",
"[",
"]",
"row_type",
"=",
"[",
"None",
"]",
"def",
"require_type",
"(",
"t",
")",
":",
"if",
"row_type",
"[",
"0",
"]",
"not",
"in",
"(",
"None",
",",
"t",
",",
"'any'",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot mix sequences and other types of rows without specifying a header'",
")",
"if",
"row_type",
"[",
"0",
"]",
"is",
"None",
":",
"row_type",
"[",
"0",
"]",
"=",
"t",
"def",
"handle_dict",
"(",
"d",
")",
":",
"require_type",
"(",
"'mapping'",
")",
"if",
"not",
"header",
":",
"header",
"[",
":",
"]",
"=",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
"rows2",
".",
"insert",
"(",
"0",
",",
"header",
")",
"return",
"[",
"helpful_error_dict_get",
"(",
"d",
",",
"key",
")",
"for",
"key",
"in",
"header",
"]",
"for",
"row",
"in",
"rows",
":",
"if",
"isinstance",
"(",
"row",
",",
"Mapping",
")",
":",
"row",
"=",
"handle_dict",
"(",
"row",
")",
"elif",
"isinstance",
"(",
"row",
",",
"Sequence",
")",
":",
"require_type",
"(",
"'sequence'",
")",
"if",
"rows2",
"and",
"len",
"(",
"row",
")",
"!=",
"len",
"(",
"rows2",
"[",
"0",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'Mismatched lengths.\\n'",
"'First row (len = %s):\\n%s\\n'",
"'Current row (len = %s):\\n%s'",
"%",
"(",
"len",
"(",
"rows2",
"[",
"0",
"]",
")",
",",
"rows2",
"[",
"0",
"]",
",",
"len",
"(",
"row",
")",
",",
"row",
")",
")",
"else",
":",
"row",
"=",
"handle_dict",
"(",
"row",
".",
"__dict__",
")",
"rows2",
".",
"append",
"(",
"row",
")",
"rows",
"=",
"[",
"[",
"str",
"(",
"cell",
")",
"for",
"cell",
"in",
"row",
"]",
"for",
"row",
"in",
"rows2",
"]",
"widths",
"=",
"[",
"max",
"(",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"for",
"row",
"in",
"rows",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"rows",
"[",
"0",
"]",
")",
")",
"]",
"lines",
"=",
"[",
"' | '",
".",
"join",
"(",
"cell",
".",
"ljust",
"(",
"width",
")",
"for",
"cell",
",",
"width",
"in",
"zip",
"(",
"row",
",",
"widths",
")",
")",
".",
"strip",
"(",
")",
"for",
"row",
"in",
"rows",
"]",
"if",
"header",
":",
"lines",
".",
"insert",
"(",
"1",
",",
"'-'",
"*",
"len",
"(",
"lines",
"[",
"0",
"]",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | 34.708333 | 20.854167 |
def images(self, tag, images, step=None, rows=None, cols=None):
"""Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
"""
images = onp.array(images)
if step is None:
step = self._step
else:
self._step = step
n_images = onp.shape(images)[0]
if rows is None and cols is None:
rows = 1
cols = n_images
elif rows is None:
rows = n_images // cols
elif cols is None:
cols = n_images // rows
tiled_images = _pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
|
[
"def",
"images",
"(",
"self",
",",
"tag",
",",
"images",
",",
"step",
"=",
"None",
",",
"rows",
"=",
"None",
",",
"cols",
"=",
"None",
")",
":",
"images",
"=",
"onp",
".",
"array",
"(",
"images",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"n_images",
"=",
"onp",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
"if",
"rows",
"is",
"None",
"and",
"cols",
"is",
"None",
":",
"rows",
"=",
"1",
"cols",
"=",
"n_images",
"elif",
"rows",
"is",
"None",
":",
"rows",
"=",
"n_images",
"//",
"cols",
"elif",
"cols",
"is",
"None",
":",
"cols",
"=",
"n_images",
"//",
"rows",
"tiled_images",
"=",
"_pack_images",
"(",
"images",
",",
"rows",
",",
"cols",
")",
"self",
".",
"image",
"(",
"tag",
",",
"tiled_images",
",",
"step",
"=",
"step",
")"
] | 33.633333 | 16.6 |
def sinusoidal_bidirectional(target,
num_points=1e2,
surface_tension='pore.surface_tension',
contact_angle='pore.contact_angle',
throat_diameter='throat.diameter',
throat_amplitude='throat.amplitude',
throat_length='throat.length',
pore_diameter='pore.diameter'):
r"""
Computes the throat capillary entry pressure assuming the throat has a
sinusoisal profile.
Makes use of the toroidal meniscus model with mode touch.
This model accounts for mensicus protrusion into adjacent pores and
touching solid features.
It is bidirectional becauase the connected pores generally have different
sizes and this determines how far the meniscus can protrude.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties
num_points : float (Default 100)
The number of divisions to make along the profile length to assess the
meniscus properties in order to find the touch length.
surface_tension : dict key (string)
The dictionary key containing the surface tension values to be used.
If a pore property is given, it is interpolated to a throat list.
contact_angle : dict key (string)
The dictionary key containing the contact angle values to be used.
If a pore property is given, it is interpolated to a throat list.
throat_diameter : dict key (string)
The dictionary key containing the throat diameter values to be used.
throat_amplitude : dict key (string)
The dictionary key containing the amplitude of variation in the throat
diameter about the mean.
throat_length : dict key (string)
The dictionary key containing the throat length values to be used.
pore_diameter : dict key (string)
The dictionary key containing the pore diameter values to be used.
Notes
"""
network = target.project.network
conns = network['throat.conns']
values = {}
for p in range(2):
network['throat.temp_diameter'] = network[pore_diameter][conns[:, p]]
key = 'throat.touch_pore_'+str(p)
target.add_model(propname=key,
model=pm.meniscus.sinusoidal,
mode='touch',
num_points=num_points,
surface_tension=surface_tension,
contact_angle=contact_angle,
throat_diameter=throat_diameter,
throat_amplitude=throat_amplitude,
throat_length=throat_length,
touch_length='throat.temp_diameter')
values[p] = target[key]
target.remove_model(key)
del network['throat.temp_diameter']
return np.vstack((values[0], values[1])).T
|
[
"def",
"sinusoidal_bidirectional",
"(",
"target",
",",
"num_points",
"=",
"1e2",
",",
"surface_tension",
"=",
"'pore.surface_tension'",
",",
"contact_angle",
"=",
"'pore.contact_angle'",
",",
"throat_diameter",
"=",
"'throat.diameter'",
",",
"throat_amplitude",
"=",
"'throat.amplitude'",
",",
"throat_length",
"=",
"'throat.length'",
",",
"pore_diameter",
"=",
"'pore.diameter'",
")",
":",
"network",
"=",
"target",
".",
"project",
".",
"network",
"conns",
"=",
"network",
"[",
"'throat.conns'",
"]",
"values",
"=",
"{",
"}",
"for",
"p",
"in",
"range",
"(",
"2",
")",
":",
"network",
"[",
"'throat.temp_diameter'",
"]",
"=",
"network",
"[",
"pore_diameter",
"]",
"[",
"conns",
"[",
":",
",",
"p",
"]",
"]",
"key",
"=",
"'throat.touch_pore_'",
"+",
"str",
"(",
"p",
")",
"target",
".",
"add_model",
"(",
"propname",
"=",
"key",
",",
"model",
"=",
"pm",
".",
"meniscus",
".",
"sinusoidal",
",",
"mode",
"=",
"'touch'",
",",
"num_points",
"=",
"num_points",
",",
"surface_tension",
"=",
"surface_tension",
",",
"contact_angle",
"=",
"contact_angle",
",",
"throat_diameter",
"=",
"throat_diameter",
",",
"throat_amplitude",
"=",
"throat_amplitude",
",",
"throat_length",
"=",
"throat_length",
",",
"touch_length",
"=",
"'throat.temp_diameter'",
")",
"values",
"[",
"p",
"]",
"=",
"target",
"[",
"key",
"]",
"target",
".",
"remove_model",
"(",
"key",
")",
"del",
"network",
"[",
"'throat.temp_diameter'",
"]",
"return",
"np",
".",
"vstack",
"(",
"(",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
"]",
")",
")",
".",
"T"
] | 43.671429 | 20.2 |
def delete_project(self, project_name):
""" delete project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:return: DeleteProjectResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/"
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteProjectResponse(header, resp)
|
[
"def",
"delete_project",
"(",
"self",
",",
"project_name",
")",
":",
"headers",
"=",
"{",
"}",
"params",
"=",
"{",
"}",
"resource",
"=",
"\"/\"",
"(",
"resp",
",",
"header",
")",
"=",
"self",
".",
"_send",
"(",
"\"DELETE\"",
",",
"project_name",
",",
"None",
",",
"resource",
",",
"params",
",",
"headers",
")",
"return",
"DeleteProjectResponse",
"(",
"header",
",",
"resp",
")"
] | 29.411765 | 18.647059 |
def get_most_relevant_words_for_topic(vocab, rel_mat, topic, n=None):
"""
Get words from `vocab` for `topic` ordered by most to least relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` most relevant words.
"""
_check_relevant_words_for_topic_args(vocab, rel_mat, topic)
return _words_by_score(vocab, rel_mat[topic], least_to_most=False, n=n)
|
[
"def",
"get_most_relevant_words_for_topic",
"(",
"vocab",
",",
"rel_mat",
",",
"topic",
",",
"n",
"=",
"None",
")",
":",
"_check_relevant_words_for_topic_args",
"(",
"vocab",
",",
"rel_mat",
",",
"topic",
")",
"return",
"_words_by_score",
"(",
"vocab",
",",
"rel_mat",
"[",
"topic",
"]",
",",
"least_to_most",
"=",
"False",
",",
"n",
"=",
"n",
")"
] | 57.5 | 25.75 |
def search_prod_type_tags(self, ins, type, tags, pipeline):
'''Returns the first coincidence...'''
return StoredProduct(id=100, content='null.fits', tags={})
|
[
"def",
"search_prod_type_tags",
"(",
"self",
",",
"ins",
",",
"type",
",",
"tags",
",",
"pipeline",
")",
":",
"return",
"StoredProduct",
"(",
"id",
"=",
"100",
",",
"content",
"=",
"'null.fits'",
",",
"tags",
"=",
"{",
"}",
")"
] | 57 | 17 |
def gevent_spawn(self):
""" Spawn worker threads (using gevent) """
monkey.patch_all(thread=False)
joinall([spawn(self.gevent_worker) for x in range(self.queue_worker_amount)])
|
[
"def",
"gevent_spawn",
"(",
"self",
")",
":",
"monkey",
".",
"patch_all",
"(",
"thread",
"=",
"False",
")",
"joinall",
"(",
"[",
"spawn",
"(",
"self",
".",
"gevent_worker",
")",
"for",
"x",
"in",
"range",
"(",
"self",
".",
"queue_worker_amount",
")",
"]",
")"
] | 49.25 | 16 |
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
|
[
"def",
"densenet121",
"(",
"num_classes",
"=",
"1000",
",",
"pretrained",
"=",
"'imagenet'",
")",
":",
"model",
"=",
"models",
".",
"densenet121",
"(",
"pretrained",
"=",
"False",
")",
"if",
"pretrained",
"is",
"not",
"None",
":",
"settings",
"=",
"pretrained_settings",
"[",
"'densenet121'",
"]",
"[",
"pretrained",
"]",
"model",
"=",
"load_pretrained",
"(",
"model",
",",
"num_classes",
",",
"settings",
")",
"model",
"=",
"modify_densenets",
"(",
"model",
")",
"return",
"model"
] | 43.7 | 15.7 |
def get_nfc_chars(self):
"""
Returns the set of IPA symbols that are precomposed (decomposable)
chars. These should not be decomposed during string normalisation,
because they will not be recognised otherwise.
In IPA 2015 there is only one precomposed character: ç, the voiceless
palatal fricative.
"""
ex = []
for char in self.ipa.keys():
if len(char) == 1:
decomp = unicodedata.normalize('NFD', char)
if len(decomp) == 2:
ex.append(char)
return set(ex)
|
[
"def",
"get_nfc_chars",
"(",
"self",
")",
":",
"ex",
"=",
"[",
"]",
"for",
"char",
"in",
"self",
".",
"ipa",
".",
"keys",
"(",
")",
":",
"if",
"len",
"(",
"char",
")",
"==",
"1",
":",
"decomp",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFD'",
",",
"char",
")",
"if",
"len",
"(",
"decomp",
")",
"==",
"2",
":",
"ex",
".",
"append",
"(",
"char",
")",
"return",
"set",
"(",
"ex",
")"
] | 26.444444 | 21 |
def cyl_to_spher(R,Z, phi):
"""
NAME:
cyl_to_spher
PURPOSE:
convert from cylindrical to spherical coordinates
INPUT:
R, Z, phi- cylindrical coordinates
OUTPUT:
R, theta, phi - spherical coordinates
HISTORY:
2016-05-16 - Written - Aladdin
"""
theta = nu.arctan2(R, Z)
r = (R**2 + Z**2)**.5
return (r,theta, phi)
|
[
"def",
"cyl_to_spher",
"(",
"R",
",",
"Z",
",",
"phi",
")",
":",
"theta",
"=",
"nu",
".",
"arctan2",
"(",
"R",
",",
"Z",
")",
"r",
"=",
"(",
"R",
"**",
"2",
"+",
"Z",
"**",
"2",
")",
"**",
".5",
"return",
"(",
"r",
",",
"theta",
",",
"phi",
")"
] | 14.230769 | 24.846154 |
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
if len(names) == 1:
vers = __proxy__['dummy.package_status'](names[0])
return vers[names[0]]
else:
results = {}
for n in names:
vers = __proxy__['dummy.package_status'](n)
results.update(vers)
return results
|
[
"def",
"version",
"(",
"*",
"names",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"names",
")",
"==",
"1",
":",
"vers",
"=",
"__proxy__",
"[",
"'dummy.package_status'",
"]",
"(",
"names",
"[",
"0",
"]",
")",
"return",
"vers",
"[",
"names",
"[",
"0",
"]",
"]",
"else",
":",
"results",
"=",
"{",
"}",
"for",
"n",
"in",
"names",
":",
"vers",
"=",
"__proxy__",
"[",
"'dummy.package_status'",
"]",
"(",
"n",
")",
"results",
".",
"update",
"(",
"vers",
")",
"return",
"results"
] | 29.318182 | 22.318182 |
def duplicate_pvd(self):
# type: () -> None
'''
A method to add a duplicate PVD to the ISO. This is a mostly useless
feature allowed by Ecma-119 to have duplicate PVDs to avoid possible
corruption.
Parameters:
None.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO')
pvd = headervd.PrimaryOrSupplementaryVD(headervd.VOLUME_DESCRIPTOR_TYPE_PRIMARY)
pvd.copy(self.pvd)
self.pvds.append(pvd)
self._finish_add(self.pvd.logical_block_size(), 0)
|
[
"def",
"duplicate_pvd",
"(",
"self",
")",
":",
"# type: () -> None",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInvalidInput",
"(",
"'This object is not yet initialized; call either open() or new() to create an ISO'",
")",
"pvd",
"=",
"headervd",
".",
"PrimaryOrSupplementaryVD",
"(",
"headervd",
".",
"VOLUME_DESCRIPTOR_TYPE_PRIMARY",
")",
"pvd",
".",
"copy",
"(",
"self",
".",
"pvd",
")",
"self",
".",
"pvds",
".",
"append",
"(",
"pvd",
")",
"self",
".",
"_finish_add",
"(",
"self",
".",
"pvd",
".",
"logical_block_size",
"(",
")",
",",
"0",
")"
] | 34.1 | 29.6 |
def config_loader(app, **kwargs_config):
"""Configuration loader.
Adds support for loading templates from the Flask application's instance
folder (``<instance_folder>/templates``).
"""
# This is the only place customize the Flask application right after
# it has been created, but before all extensions etc are loaded.
local_templates_path = os.path.join(app.instance_path, 'templates')
if os.path.exists(local_templates_path):
# Let's customize the template loader to look into packages
# and application templates folders.
app.jinja_loader = ChoiceLoader([
FileSystemLoader(local_templates_path),
app.jinja_loader,
])
app.jinja_options = dict(
app.jinja_options,
cache_size=1000,
bytecode_cache=BytecodeCache(app)
)
invenio_config_loader(app, **kwargs_config)
|
[
"def",
"config_loader",
"(",
"app",
",",
"*",
"*",
"kwargs_config",
")",
":",
"# This is the only place customize the Flask application right after",
"# it has been created, but before all extensions etc are loaded.",
"local_templates_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"instance_path",
",",
"'templates'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_templates_path",
")",
":",
"# Let's customize the template loader to look into packages",
"# and application templates folders.",
"app",
".",
"jinja_loader",
"=",
"ChoiceLoader",
"(",
"[",
"FileSystemLoader",
"(",
"local_templates_path",
")",
",",
"app",
".",
"jinja_loader",
",",
"]",
")",
"app",
".",
"jinja_options",
"=",
"dict",
"(",
"app",
".",
"jinja_options",
",",
"cache_size",
"=",
"1000",
",",
"bytecode_cache",
"=",
"BytecodeCache",
"(",
"app",
")",
")",
"invenio_config_loader",
"(",
"app",
",",
"*",
"*",
"kwargs_config",
")"
] | 36.041667 | 17.666667 |
async def read(self, amt=None):
"""Read at most amt bytes from the stream.
If the amt argument is omitted, read all data.
"""
# botocore to aiohttp mapping
chunk = await self.__wrapped__.read(amt if amt is not None else -1)
self._self_amount_read += len(chunk)
if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
self._verify_content_length()
return chunk
|
[
"async",
"def",
"read",
"(",
"self",
",",
"amt",
"=",
"None",
")",
":",
"# botocore to aiohttp mapping",
"chunk",
"=",
"await",
"self",
".",
"__wrapped__",
".",
"read",
"(",
"amt",
"if",
"amt",
"is",
"not",
"None",
"else",
"-",
"1",
")",
"self",
".",
"_self_amount_read",
"+=",
"len",
"(",
"chunk",
")",
"if",
"amt",
"is",
"None",
"or",
"(",
"not",
"chunk",
"and",
"amt",
">",
"0",
")",
":",
"# If the server sends empty contents or",
"# we ask to read all of the contents, then we know",
"# we need to verify the content length.",
"self",
".",
"_verify_content_length",
"(",
")",
"return",
"chunk"
] | 41.214286 | 12.857143 |
def create_relationship(self, relationship_form=None):
"""Creates a new ``Relationship``.
arg: relationship_form (osid.relationship.RelationshipForm):
the form for this ``Relationship``
return: (osid.relationship.Relationship) - the new
``Relationship``
raise: IllegalState - ``relationship_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``relationship_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``relationship_form`` did not originate
from ``get_relationship_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
if relationship_form is None:
raise NullArgument()
if not isinstance(relationship_form, abc_relationship_objects.RelationshipForm):
raise InvalidArgument('argument type is not a RelationshipForm')
if relationship_form.is_for_update():
raise InvalidArgument('form is for update only, not create')
try:
if self._forms[relationship_form.get_id().get_identifier()] == CREATED:
raise IllegalState('form already used in a create transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not relationship_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = ('/handcar/services/relationship/families/' +
self._catalog_idstr + '/relationships')
try:
result = self._post_request(url_path, relationship_form._my_map)
except Exception:
raise # OperationFailed
self._forms[relationship_form.get_id().get_identifier()] = CREATED
return objects.Relationship(result)
|
[
"def",
"create_relationship",
"(",
"self",
",",
"relationship_form",
"=",
"None",
")",
":",
"if",
"relationship_form",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"if",
"not",
"isinstance",
"(",
"relationship_form",
",",
"abc_relationship_objects",
".",
"RelationshipForm",
")",
":",
"raise",
"InvalidArgument",
"(",
"'argument type is not a RelationshipForm'",
")",
"if",
"relationship_form",
".",
"is_for_update",
"(",
")",
":",
"raise",
"InvalidArgument",
"(",
"'form is for update only, not create'",
")",
"try",
":",
"if",
"self",
".",
"_forms",
"[",
"relationship_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"==",
"CREATED",
":",
"raise",
"IllegalState",
"(",
"'form already used in a create transaction'",
")",
"except",
"KeyError",
":",
"raise",
"Unsupported",
"(",
"'form did not originate from this session'",
")",
"if",
"not",
"relationship_form",
".",
"is_valid",
"(",
")",
":",
"raise",
"InvalidArgument",
"(",
"'one or more of the form elements is invalid'",
")",
"url_path",
"=",
"(",
"'/handcar/services/relationship/families/'",
"+",
"self",
".",
"_catalog_idstr",
"+",
"'/relationships'",
")",
"try",
":",
"result",
"=",
"self",
".",
"_post_request",
"(",
"url_path",
",",
"relationship_form",
".",
"_my_map",
")",
"except",
"Exception",
":",
"raise",
"# OperationFailed",
"self",
".",
"_forms",
"[",
"relationship_form",
".",
"get_id",
"(",
")",
".",
"get_identifier",
"(",
")",
"]",
"=",
"CREATED",
"return",
"objects",
".",
"Relationship",
"(",
"result",
")"
] | 49.341463 | 22.341463 |
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata):
"""METADATA.pb weight matches postScriptName."""
WEIGHTS = {
"Thin": 100,
"ThinItalic": 100,
"ExtraLight": 200,
"ExtraLightItalic": 200,
"Light": 300,
"LightItalic": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"MediumItalic": 500,
"SemiBold": 600,
"SemiBoldItalic": 600,
"Bold": 700,
"BoldItalic": 700,
"ExtraBold": 800,
"ExtraBoldItalic": 800,
"Black": 900,
"BlackItalic": 900
}
pair = []
for k, weight in WEIGHTS.items():
if weight == font_metadata.weight:
pair.append((k, weight))
if not pair:
yield FAIL, ("METADATA.pb: Font weight value ({})"
" is invalid.").format(font_metadata.weight)
elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or
font_metadata.post_script_name.endswith('-' + pair[1][0])):
yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")"
" and weight value ({}). The name must be"
" ended with \"{}\" or \"{}\"."
"").format(font_metadata.post_script_name,
pair[0][1],
pair[0][0],
pair[1][0])
else:
yield PASS, "Weight value matches postScriptName."
|
[
"def",
"com_google_fonts_check_metadata_match_weight_postscript",
"(",
"font_metadata",
")",
":",
"WEIGHTS",
"=",
"{",
"\"Thin\"",
":",
"100",
",",
"\"ThinItalic\"",
":",
"100",
",",
"\"ExtraLight\"",
":",
"200",
",",
"\"ExtraLightItalic\"",
":",
"200",
",",
"\"Light\"",
":",
"300",
",",
"\"LightItalic\"",
":",
"300",
",",
"\"Regular\"",
":",
"400",
",",
"\"Italic\"",
":",
"400",
",",
"\"Medium\"",
":",
"500",
",",
"\"MediumItalic\"",
":",
"500",
",",
"\"SemiBold\"",
":",
"600",
",",
"\"SemiBoldItalic\"",
":",
"600",
",",
"\"Bold\"",
":",
"700",
",",
"\"BoldItalic\"",
":",
"700",
",",
"\"ExtraBold\"",
":",
"800",
",",
"\"ExtraBoldItalic\"",
":",
"800",
",",
"\"Black\"",
":",
"900",
",",
"\"BlackItalic\"",
":",
"900",
"}",
"pair",
"=",
"[",
"]",
"for",
"k",
",",
"weight",
"in",
"WEIGHTS",
".",
"items",
"(",
")",
":",
"if",
"weight",
"==",
"font_metadata",
".",
"weight",
":",
"pair",
".",
"append",
"(",
"(",
"k",
",",
"weight",
")",
")",
"if",
"not",
"pair",
":",
"yield",
"FAIL",
",",
"(",
"\"METADATA.pb: Font weight value ({})\"",
"\" is invalid.\"",
")",
".",
"format",
"(",
"font_metadata",
".",
"weight",
")",
"elif",
"not",
"(",
"font_metadata",
".",
"post_script_name",
".",
"endswith",
"(",
"'-'",
"+",
"pair",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"or",
"font_metadata",
".",
"post_script_name",
".",
"endswith",
"(",
"'-'",
"+",
"pair",
"[",
"1",
"]",
"[",
"0",
"]",
")",
")",
":",
"yield",
"FAIL",
",",
"(",
"\"METADATA.pb: Mismatch between postScriptName (\\\"{}\\\")\"",
"\" and weight value ({}). The name must be\"",
"\" ended with \\\"{}\\\" or \\\"{}\\\".\"",
"\"\"",
")",
".",
"format",
"(",
"font_metadata",
".",
"post_script_name",
",",
"pair",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"pair",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"pair",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"else",
":",
"yield",
"PASS",
",",
"\"Weight value matches postScriptName.\""
] | 32.219512 | 19 |
def OnTimeToClose(self, evt):
"""Event handler for the button click."""
print("See ya later!")
sys.stdout.flush()
self.cleanup_consoles(evt)
self.Close()
# Not sure why, but our IPython kernel seems to prevent normal WX
# shutdown, so an explicit exit() call is needed.
sys.exit()
|
[
"def",
"OnTimeToClose",
"(",
"self",
",",
"evt",
")",
":",
"print",
"(",
"\"See ya later!\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"self",
".",
"cleanup_consoles",
"(",
"evt",
")",
"self",
".",
"Close",
"(",
")",
"# Not sure why, but our IPython kernel seems to prevent normal WX",
"# shutdown, so an explicit exit() call is needed.",
"sys",
".",
"exit",
"(",
")"
] | 37.333333 | 14.777778 |
def parse_form_action_url(html, parser=None):
"""Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action')
|
[
"def",
"parse_form_action_url",
"(",
"html",
",",
"parser",
"=",
"None",
")",
":",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"bs4",
".",
"BeautifulSoup",
"(",
"html",
",",
"'html.parser'",
")",
"forms",
"=",
"parser",
".",
"find_all",
"(",
"'form'",
")",
"if",
"not",
"forms",
":",
"raise",
"VkParseError",
"(",
"'Action form is not found in the html \\n%s'",
"%",
"html",
")",
"if",
"len",
"(",
"forms",
")",
">",
"1",
":",
"raise",
"VkParseError",
"(",
"'Find more than 1 forms to handle:\\n%s'",
",",
"forms",
")",
"form",
"=",
"forms",
"[",
"0",
"]",
"return",
"form",
".",
"get",
"(",
"'action'",
")"
] | 35.470588 | 18.882353 |
def filter(self, source_file, encoding): # noqa A001
"""Parse XML file."""
sources = []
for content, filename, enc in self.get_content(source_file):
self.additional_context = self.get_context(filename)
sources.extend(self._filter(content, source_file, enc))
return sources
|
[
"def",
"filter",
"(",
"self",
",",
"source_file",
",",
"encoding",
")",
":",
"# noqa A001",
"sources",
"=",
"[",
"]",
"for",
"content",
",",
"filename",
",",
"enc",
"in",
"self",
".",
"get_content",
"(",
"source_file",
")",
":",
"self",
".",
"additional_context",
"=",
"self",
".",
"get_context",
"(",
"filename",
")",
"sources",
".",
"extend",
"(",
"self",
".",
"_filter",
"(",
"content",
",",
"source_file",
",",
"enc",
")",
")",
"return",
"sources"
] | 40.375 | 21.25 |
def linkCustomerToVerifiedUser(sender, **kwargs):
"""
If a Registration is processed in which the associated Customer does not yet
have a User, then check to see if the Customer's email address has been
verified as belonging to a specific User, and if that User has an associated
Customer. If such a User is found, then associated this Customer with that
User. This way, if a new User verifies their email account before they have
submitted any Registrations, their Customer account is seamlessly linked when
they do complete their first Registration.
"""
registration = kwargs.get('registration', None)
if not registration or (hasattr(registration.customer,'user') and registration.customer.user):
return
logger.debug('Checking for User for Customer with no associated registration.')
customer = registration.customer
try:
verified_email = EmailAddress.objects.get(
email=customer.email,
verified=True,
primary=True,
user__customer__isnull=True
)
logger.info("Found user %s to associate with customer %s.", verified_email.user.id, customer.id)
customer.user = verified_email.user
customer.save()
if not customer.user.first_name and not customer.user.last_name:
customer.user.first_name = customer.first_name
customer.user.last_name = customer.last_name
customer.user.save()
except ObjectDoesNotExist:
logger.info("No user found to associate with customer %s.", customer.id)
except MultipleObjectsReturned:
# This should never happen, as email should be unique in the db table account_emailaddress.
# If it does, something's broken in the database or Django.
errmsg = "Something's not right with the database: more than one entry found on the database for the email %s. \
This duplicate key value violates unique constraint \"account_emailaddress_email_key\". \
The email field should be unique for each account.\n"
logger.exception(errmsg, customer.email)
|
[
"def",
"linkCustomerToVerifiedUser",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"registration",
"=",
"kwargs",
".",
"get",
"(",
"'registration'",
",",
"None",
")",
"if",
"not",
"registration",
"or",
"(",
"hasattr",
"(",
"registration",
".",
"customer",
",",
"'user'",
")",
"and",
"registration",
".",
"customer",
".",
"user",
")",
":",
"return",
"logger",
".",
"debug",
"(",
"'Checking for User for Customer with no associated registration.'",
")",
"customer",
"=",
"registration",
".",
"customer",
"try",
":",
"verified_email",
"=",
"EmailAddress",
".",
"objects",
".",
"get",
"(",
"email",
"=",
"customer",
".",
"email",
",",
"verified",
"=",
"True",
",",
"primary",
"=",
"True",
",",
"user__customer__isnull",
"=",
"True",
")",
"logger",
".",
"info",
"(",
"\"Found user %s to associate with customer %s.\"",
",",
"verified_email",
".",
"user",
".",
"id",
",",
"customer",
".",
"id",
")",
"customer",
".",
"user",
"=",
"verified_email",
".",
"user",
"customer",
".",
"save",
"(",
")",
"if",
"not",
"customer",
".",
"user",
".",
"first_name",
"and",
"not",
"customer",
".",
"user",
".",
"last_name",
":",
"customer",
".",
"user",
".",
"first_name",
"=",
"customer",
".",
"first_name",
"customer",
".",
"user",
".",
"last_name",
"=",
"customer",
".",
"last_name",
"customer",
".",
"user",
".",
"save",
"(",
")",
"except",
"ObjectDoesNotExist",
":",
"logger",
".",
"info",
"(",
"\"No user found to associate with customer %s.\"",
",",
"customer",
".",
"id",
")",
"except",
"MultipleObjectsReturned",
":",
"# This should never happen, as email should be unique in the db table account_emailaddress.",
"# If it does, something's broken in the database or Django.",
"errmsg",
"=",
"\"Something's not right with the database: more than one entry found on the database for the email %s. \\\n This duplicate key value violates unique constraint \\\"account_emailaddress_email_key\\\". \\\n The email field should be unique for each account.\\n\"",
"logger",
".",
"exception",
"(",
"errmsg",
",",
"customer",
".",
"email",
")"
] | 46.466667 | 27.933333 |
def make_temp_path(path, new_ext=None):
"""
Arguments:
new_ext: the new file extension, including the leading dot.
Defaults to preserving the existing file extension.
"""
root, ext = os.path.splitext(path)
if new_ext is None:
new_ext = ext
temp_path = root + TEMP_EXTENSION + new_ext
return temp_path
|
[
"def",
"make_temp_path",
"(",
"path",
",",
"new_ext",
"=",
"None",
")",
":",
"root",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"if",
"new_ext",
"is",
"None",
":",
"new_ext",
"=",
"ext",
"temp_path",
"=",
"root",
"+",
"TEMP_EXTENSION",
"+",
"new_ext",
"return",
"temp_path"
] | 26.153846 | 16.615385 |
def runQueryWithRetry(self, *args, **kw):
"""
Run a database query, like with dbpool.runQuery, but retry the query in
case of a temporary error (like connection lost).
This is needed to be robust against things like database connection
idle timeouts."""
def runQuery(txn, *args, **kw):
txn.execute(*args, **kw)
return txn.fetchall()
return self.runInteractionWithRetry(runQuery, *args, **kw)
|
[
"def",
"runQueryWithRetry",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"def",
"runQuery",
"(",
"txn",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"txn",
".",
"execute",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"txn",
".",
"fetchall",
"(",
")",
"return",
"self",
".",
"runInteractionWithRetry",
"(",
"runQuery",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | 35.538462 | 19.230769 |
def copy(self, tagname, **kwargs):
"""
Returns a new instance of `TagWrap` using the given *tagname* that has
all the same attributes as this instance. If *kwargs* is given they
will override the attributes of the created instance.
"""
new_kwargs = {
'replacement': self.replacement,
'whitelist': self.whitelist,
'safe_mode': self.safe_mode,
'log_rejects': self.log_rejects,
'ending_slash': self.ending_slash
}
new_kwargs.update(**kwargs)
return TagWrap(tagname, **new_kwargs)
|
[
"def",
"copy",
"(",
"self",
",",
"tagname",
",",
"*",
"*",
"kwargs",
")",
":",
"new_kwargs",
"=",
"{",
"'replacement'",
":",
"self",
".",
"replacement",
",",
"'whitelist'",
":",
"self",
".",
"whitelist",
",",
"'safe_mode'",
":",
"self",
".",
"safe_mode",
",",
"'log_rejects'",
":",
"self",
".",
"log_rejects",
",",
"'ending_slash'",
":",
"self",
".",
"ending_slash",
"}",
"new_kwargs",
".",
"update",
"(",
"*",
"*",
"kwargs",
")",
"return",
"TagWrap",
"(",
"tagname",
",",
"*",
"*",
"new_kwargs",
")"
] | 39.666667 | 11.533333 |
def sort_index(self, ascending=True):
"""Sort the index of the DataFrame.
Currently MultiIndex is not supported since Weld is missing multiple-column sort.
Note this is an expensive operation (brings all data to Weld).
Parameters
----------
ascending : bool, optional
Returns
-------
DataFrame
DataFrame sorted according to the index.
"""
if isinstance(self.index, MultiIndex):
raise NotImplementedError('Weld does not yet support sorting on multiple columns')
return self.sort_values(self.index._gather_names(), ascending)
|
[
"def",
"sort_index",
"(",
"self",
",",
"ascending",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"MultiIndex",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Weld does not yet support sorting on multiple columns'",
")",
"return",
"self",
".",
"sort_values",
"(",
"self",
".",
"index",
".",
"_gather_names",
"(",
")",
",",
"ascending",
")"
] | 29.952381 | 26.047619 |
def get_url(request, application, roles, label=None):
""" Retrieve a link that will work for the current user. """
args = []
if label is not None:
args.append(label)
# don't use secret_token unless we have to
if 'is_admin' in roles:
# Administrators can access anything without secrets
require_secret = False
elif 'is_applicant' not in roles:
# we never give secrets to anybody but the applicant
require_secret = False
elif not request.user.is_authenticated:
# If applicant is not logged in, we redirect them to secret URL
require_secret = True
elif request.user != application.applicant:
# If logged in as different person, we redirect them to secret
# URL. This could happen if the application was open with a different
# email address, and the applicant is logged in when accessing it.
require_secret = True
else:
# otherwise redirect them to URL that requires correct login.
require_secret = False
# return required url
if not require_secret:
url = reverse(
'kg_application_detail',
args=[application.pk, application.state] + args)
else:
url = reverse(
'kg_application_unauthenticated',
args=[application.secret_token, application.state] + args)
return url
|
[
"def",
"get_url",
"(",
"request",
",",
"application",
",",
"roles",
",",
"label",
"=",
"None",
")",
":",
"args",
"=",
"[",
"]",
"if",
"label",
"is",
"not",
"None",
":",
"args",
".",
"append",
"(",
"label",
")",
"# don't use secret_token unless we have to",
"if",
"'is_admin'",
"in",
"roles",
":",
"# Administrators can access anything without secrets",
"require_secret",
"=",
"False",
"elif",
"'is_applicant'",
"not",
"in",
"roles",
":",
"# we never give secrets to anybody but the applicant",
"require_secret",
"=",
"False",
"elif",
"not",
"request",
".",
"user",
".",
"is_authenticated",
":",
"# If applicant is not logged in, we redirect them to secret URL",
"require_secret",
"=",
"True",
"elif",
"request",
".",
"user",
"!=",
"application",
".",
"applicant",
":",
"# If logged in as different person, we redirect them to secret",
"# URL. This could happen if the application was open with a different",
"# email address, and the applicant is logged in when accessing it.",
"require_secret",
"=",
"True",
"else",
":",
"# otherwise redirect them to URL that requires correct login.",
"require_secret",
"=",
"False",
"# return required url",
"if",
"not",
"require_secret",
":",
"url",
"=",
"reverse",
"(",
"'kg_application_detail'",
",",
"args",
"=",
"[",
"application",
".",
"pk",
",",
"application",
".",
"state",
"]",
"+",
"args",
")",
"else",
":",
"url",
"=",
"reverse",
"(",
"'kg_application_unauthenticated'",
",",
"args",
"=",
"[",
"application",
".",
"secret_token",
",",
"application",
".",
"state",
"]",
"+",
"args",
")",
"return",
"url"
] | 38.514286 | 18.457143 |
def feed(self, data):
"""Consume some data and advances the state as necessary.
:param str data: a blob of data to feed from.
"""
send = self._send_to_parser
draw = self.listener.draw
match_text = self._text_pattern.match
taking_plain_text = self._taking_plain_text
length = len(data)
offset = 0
while offset < length:
if taking_plain_text:
match = match_text(data, offset)
if match:
start, offset = match.span()
draw(data[start:offset])
else:
taking_plain_text = False
else:
taking_plain_text = send(data[offset:offset + 1])
offset += 1
self._taking_plain_text = taking_plain_text
|
[
"def",
"feed",
"(",
"self",
",",
"data",
")",
":",
"send",
"=",
"self",
".",
"_send_to_parser",
"draw",
"=",
"self",
".",
"listener",
".",
"draw",
"match_text",
"=",
"self",
".",
"_text_pattern",
".",
"match",
"taking_plain_text",
"=",
"self",
".",
"_taking_plain_text",
"length",
"=",
"len",
"(",
"data",
")",
"offset",
"=",
"0",
"while",
"offset",
"<",
"length",
":",
"if",
"taking_plain_text",
":",
"match",
"=",
"match_text",
"(",
"data",
",",
"offset",
")",
"if",
"match",
":",
"start",
",",
"offset",
"=",
"match",
".",
"span",
"(",
")",
"draw",
"(",
"data",
"[",
"start",
":",
"offset",
"]",
")",
"else",
":",
"taking_plain_text",
"=",
"False",
"else",
":",
"taking_plain_text",
"=",
"send",
"(",
"data",
"[",
"offset",
":",
"offset",
"+",
"1",
"]",
")",
"offset",
"+=",
"1",
"self",
".",
"_taking_plain_text",
"=",
"taking_plain_text"
] | 32.48 | 14.56 |
def has_cjk(self):
"""Checks if the word of the chunk contains CJK characters.
This is using unicode codepoint ranges from
https://github.com/nltk/nltk/blob/develop/nltk/tokenize/util.py#L149
Returns:
bool: True if the chunk has any CJK character.
"""
cjk_codepoint_ranges = [
(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215),
(63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
for char in self.word:
if any([start <= ord(char) <= end
for start, end in cjk_codepoint_ranges]):
return True
return False
|
[
"def",
"has_cjk",
"(",
"self",
")",
":",
"cjk_codepoint_ranges",
"=",
"[",
"(",
"4352",
",",
"4607",
")",
",",
"(",
"11904",
",",
"42191",
")",
",",
"(",
"43072",
",",
"43135",
")",
",",
"(",
"44032",
",",
"55215",
")",
",",
"(",
"63744",
",",
"64255",
")",
",",
"(",
"65072",
",",
"65103",
")",
",",
"(",
"65381",
",",
"65500",
")",
",",
"(",
"131072",
",",
"196607",
")",
"]",
"for",
"char",
"in",
"self",
".",
"word",
":",
"if",
"any",
"(",
"[",
"start",
"<=",
"ord",
"(",
"char",
")",
"<=",
"end",
"for",
"start",
",",
"end",
"in",
"cjk_codepoint_ranges",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | 35.058824 | 19.411765 |
def supports_coordinate_type(self, coordinate_type=None):
"""Tests if the given coordinate type is supported.
arg: coordinate_type (osid.type.Type): a coordinate Type
return: (boolean) - ``true`` if the type is supported, ``false``
otherwise
raise: IllegalState - syntax is not a ``COORDINATE``
raise: NullArgument - ``coordinate_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.Metadata.supports_coordinate_type
from .osid_errors import IllegalState, NullArgument
if not coordinate_type:
raise NullArgument('no input Type provided')
if self._kwargs['syntax'] not in ['``COORDINATE``']:
raise IllegalState('put more meaninful message here')
return coordinate_type in self.get_coordinate_types
|
[
"def",
"supports_coordinate_type",
"(",
"self",
",",
"coordinate_type",
"=",
"None",
")",
":",
"# Implemented from template for osid.Metadata.supports_coordinate_type",
"from",
".",
"osid_errors",
"import",
"IllegalState",
",",
"NullArgument",
"if",
"not",
"coordinate_type",
":",
"raise",
"NullArgument",
"(",
"'no input Type provided'",
")",
"if",
"self",
".",
"_kwargs",
"[",
"'syntax'",
"]",
"not",
"in",
"[",
"'``COORDINATE``'",
"]",
":",
"raise",
"IllegalState",
"(",
"'put more meaninful message here'",
")",
"return",
"coordinate_type",
"in",
"self",
".",
"get_coordinate_types"
] | 49.388889 | 21.5 |
def send_output(self, value, stdout):
"""Write the output or value of the expression back to user.
>>> 5
5
>>> print('cash rules everything around me')
cash rules everything around me
"""
writer = self.writer
if value is not None:
writer.write('{!r}\n'.format(value).encode('utf8'))
if stdout:
writer.write(stdout.encode('utf8'))
yield from writer.drain()
|
[
"def",
"send_output",
"(",
"self",
",",
"value",
",",
"stdout",
")",
":",
"writer",
"=",
"self",
".",
"writer",
"if",
"value",
"is",
"not",
"None",
":",
"writer",
".",
"write",
"(",
"'{!r}\\n'",
".",
"format",
"(",
"value",
")",
".",
"encode",
"(",
"'utf8'",
")",
")",
"if",
"stdout",
":",
"writer",
".",
"write",
"(",
"stdout",
".",
"encode",
"(",
"'utf8'",
")",
")",
"yield",
"from",
"writer",
".",
"drain",
"(",
")"
] | 24.833333 | 19.777778 |
def find_projects(self, file_identifier=".project"):
""" Search all directory recursively for subdirs
with `file_identifier' in it.
:type file_identifier: str
:param file_identifier: File identier, .project by default.
:rtype: list
:return: The list of subdirs with a `file_identifier' in it.
"""
projects = []
for d in self.subdirs():
project_file = os.path.join(self.directory, d, file_identifier)
if os.path.isfile(project_file):
projects.append(d)
return projects
|
[
"def",
"find_projects",
"(",
"self",
",",
"file_identifier",
"=",
"\".project\"",
")",
":",
"projects",
"=",
"[",
"]",
"for",
"d",
"in",
"self",
".",
"subdirs",
"(",
")",
":",
"project_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"d",
",",
"file_identifier",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"project_file",
")",
":",
"projects",
".",
"append",
"(",
"d",
")",
"return",
"projects"
] | 33.764706 | 17.941176 |
def private_config_content(self, private_config):
"""
Update the private config
:param private_config: content of the private configuration file
"""
try:
private_config_path = os.path.join(self.working_dir, "private-config.cfg")
if private_config is None:
private_config = ''
# We disallow erasing the private config file
if len(private_config) == 0 and os.path.exists(private_config_path):
return
with open(private_config_path, 'w+', encoding='utf-8') as f:
if len(private_config) == 0:
f.write('')
else:
private_config = private_config.replace("%h", self._name)
f.write(private_config)
except OSError as e:
raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e))
|
[
"def",
"private_config_content",
"(",
"self",
",",
"private_config",
")",
":",
"try",
":",
"private_config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"working_dir",
",",
"\"private-config.cfg\"",
")",
"if",
"private_config",
"is",
"None",
":",
"private_config",
"=",
"''",
"# We disallow erasing the private config file",
"if",
"len",
"(",
"private_config",
")",
"==",
"0",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"private_config_path",
")",
":",
"return",
"with",
"open",
"(",
"private_config_path",
",",
"'w+'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"if",
"len",
"(",
"private_config",
")",
"==",
"0",
":",
"f",
".",
"write",
"(",
"''",
")",
"else",
":",
"private_config",
"=",
"private_config",
".",
"replace",
"(",
"\"%h\"",
",",
"self",
".",
"_name",
")",
"f",
".",
"write",
"(",
"private_config",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"IOUError",
"(",
"\"Can't write private-config file '{}': {}\"",
".",
"format",
"(",
"private_config_path",
",",
"e",
")",
")"
] | 36.92 | 23.24 |
def run(self, grid=None, num_of_paths=2000, seed=0, num_of_workers=CPU_COUNT, profiling=False):
"""
implements simulation
:param list(date) grid: list of Monte Carlo grid dates
:param int num_of_paths: number of Monte Carlo paths
:param hashable seed: seed used for rnds initialisation (additional adjustment in place)
:param int or None num_of_workers: number of parallel workers (default: cpu_count()),
if None no parallel processing is used
:param bool profiling: signal whether to use profiling, True means used, else not
:return object: final consumer state
It returns a list of lists.
The list contains per path a list produced by consumer at observation dates
"""
self.grid = sorted(set(grid))
self.num_of_paths = num_of_paths
self.num_of_workers = num_of_workers
self.seed = seed
# pre processing
self.producer.initialize(self.grid, self.num_of_paths, self.seed)
self.consumer.initialize(self.grid, self.num_of_paths, self.seed)
if num_of_workers:
# processing
workers = list()
queue = Queue()
path_per_worker = int(num_of_paths // num_of_workers)
start_path, stop_path = 0, path_per_worker
for i in range(num_of_workers):
if i == num_of_workers - 1:
stop_path = num_of_paths # ensure exact num of path as required
name = 'worker-%d' % i
if profiling:
# display profile with `snakeviz worker-0.prof`
# if not installed `pip install snakeviz`
workers.append(Process(target=self._run_parallel_process_with_profiling,
name=name,
args=(start_path, stop_path, queue, name + '.prof')))
else:
workers.append(Process(target=self._run_parallel_process,
name=name,
args=(start_path, stop_path, queue)))
start_path, stop_path = stop_path, stop_path + path_per_worker
for worker in workers:
worker.start()
# post processing
for _ in range(num_of_workers):
self.consumer.get(queue.get())
for worker in workers:
worker.join()
else:
self._run_process(0, num_of_paths)
self.consumer.finalize()
return self.consumer.result
|
[
"def",
"run",
"(",
"self",
",",
"grid",
"=",
"None",
",",
"num_of_paths",
"=",
"2000",
",",
"seed",
"=",
"0",
",",
"num_of_workers",
"=",
"CPU_COUNT",
",",
"profiling",
"=",
"False",
")",
":",
"self",
".",
"grid",
"=",
"sorted",
"(",
"set",
"(",
"grid",
")",
")",
"self",
".",
"num_of_paths",
"=",
"num_of_paths",
"self",
".",
"num_of_workers",
"=",
"num_of_workers",
"self",
".",
"seed",
"=",
"seed",
"# pre processing",
"self",
".",
"producer",
".",
"initialize",
"(",
"self",
".",
"grid",
",",
"self",
".",
"num_of_paths",
",",
"self",
".",
"seed",
")",
"self",
".",
"consumer",
".",
"initialize",
"(",
"self",
".",
"grid",
",",
"self",
".",
"num_of_paths",
",",
"self",
".",
"seed",
")",
"if",
"num_of_workers",
":",
"# processing",
"workers",
"=",
"list",
"(",
")",
"queue",
"=",
"Queue",
"(",
")",
"path_per_worker",
"=",
"int",
"(",
"num_of_paths",
"//",
"num_of_workers",
")",
"start_path",
",",
"stop_path",
"=",
"0",
",",
"path_per_worker",
"for",
"i",
"in",
"range",
"(",
"num_of_workers",
")",
":",
"if",
"i",
"==",
"num_of_workers",
"-",
"1",
":",
"stop_path",
"=",
"num_of_paths",
"# ensure exact num of path as required",
"name",
"=",
"'worker-%d'",
"%",
"i",
"if",
"profiling",
":",
"# display profile with `snakeviz worker-0.prof`",
"# if not installed `pip install snakeviz`",
"workers",
".",
"append",
"(",
"Process",
"(",
"target",
"=",
"self",
".",
"_run_parallel_process_with_profiling",
",",
"name",
"=",
"name",
",",
"args",
"=",
"(",
"start_path",
",",
"stop_path",
",",
"queue",
",",
"name",
"+",
"'.prof'",
")",
")",
")",
"else",
":",
"workers",
".",
"append",
"(",
"Process",
"(",
"target",
"=",
"self",
".",
"_run_parallel_process",
",",
"name",
"=",
"name",
",",
"args",
"=",
"(",
"start_path",
",",
"stop_path",
",",
"queue",
")",
")",
")",
"start_path",
",",
"stop_path",
"=",
"stop_path",
",",
"stop_path",
"+",
"path_per_worker",
"for",
"worker",
"in",
"workers",
":",
"worker",
".",
"start",
"(",
")",
"# post processing",
"for",
"_",
"in",
"range",
"(",
"num_of_workers",
")",
":",
"self",
".",
"consumer",
".",
"get",
"(",
"queue",
".",
"get",
"(",
")",
")",
"for",
"worker",
"in",
"workers",
":",
"worker",
".",
"join",
"(",
")",
"else",
":",
"self",
".",
"_run_process",
"(",
"0",
",",
"num_of_paths",
")",
"self",
".",
"consumer",
".",
"finalize",
"(",
")",
"return",
"self",
".",
"consumer",
".",
"result"
] | 44.534483 | 21.465517 |
def static(self, uri, file_or_directory, *args, **kwargs):
"""Create a blueprint static route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param file_or_directory: Static asset.
"""
static = FutureStatic(uri, file_or_directory, args, kwargs)
self.statics.append(static)
|
[
"def",
"static",
"(",
"self",
",",
"uri",
",",
"file_or_directory",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"static",
"=",
"FutureStatic",
"(",
"uri",
",",
"file_or_directory",
",",
"args",
",",
"kwargs",
")",
"self",
".",
"statics",
".",
"append",
"(",
"static",
")"
] | 44.25 | 15.5 |
def build_parameters(self):
"""
Build the CLI command line from the parameter values.
:return: list of CLI strings -- not escaped!
:rtype: list[str]
"""
param_bits = []
for name in self.parameters:
param_bits.extend(self.build_parameter_by_name(name) or [])
return param_bits
|
[
"def",
"build_parameters",
"(",
"self",
")",
":",
"param_bits",
"=",
"[",
"]",
"for",
"name",
"in",
"self",
".",
"parameters",
":",
"param_bits",
".",
"extend",
"(",
"self",
".",
"build_parameter_by_name",
"(",
"name",
")",
"or",
"[",
"]",
")",
"return",
"param_bits"
] | 31.090909 | 15.272727 |
def copy(self):
"""
Return a new :class:`~pywbem.CIMParameter` object that is a copy
of this CIM parameter.
This is a middle-deep copy; any mutable types in attributes except the
following are copied, so besides these exceptions, modifications of the
original object will not affect the returned copy, and vice versa. The
following mutable types are not copied and are therefore shared between
original and copy:
* The :class:`~pywbem.CIMQualifier` objects in the
:attr:`~pywbem.CIMParameter.qualifiers` dictionary (but not the
dictionary object itself)
Note that the Python functions :func:`py:copy.copy` and
:func:`py:copy.deepcopy` can be used to create completely shallow or
completely deep copies of objects of this class.
"""
return CIMParameter(
self.name,
self.type,
reference_class=self.reference_class,
is_array=self.is_array,
array_size=self.array_size,
value=self.value,
embedded_object=self.embedded_object,
qualifiers=self.qualifiers)
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"CIMParameter",
"(",
"self",
".",
"name",
",",
"self",
".",
"type",
",",
"reference_class",
"=",
"self",
".",
"reference_class",
",",
"is_array",
"=",
"self",
".",
"is_array",
",",
"array_size",
"=",
"self",
".",
"array_size",
",",
"value",
"=",
"self",
".",
"value",
",",
"embedded_object",
"=",
"self",
".",
"embedded_object",
",",
"qualifiers",
"=",
"self",
".",
"qualifiers",
")"
] | 41.142857 | 20.357143 |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force = -dphi/dR
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
l,n = bovy_coords.Rz_to_lambdanu (R,z,ac=self._ac,Delta=self._Delta)
jac = bovy_coords.Rz_to_lambdanu_jac(R,z, Delta=self._Delta)
dldR = jac[0,0]
dndR = jac[1,0]
return - (dldR * self._lderiv(l,n) + \
dndR * self._nderiv(l,n))
|
[
"def",
"_Rforce",
"(",
"self",
",",
"R",
",",
"z",
",",
"phi",
"=",
"0.",
",",
"t",
"=",
"0.",
")",
":",
"l",
",",
"n",
"=",
"bovy_coords",
".",
"Rz_to_lambdanu",
"(",
"R",
",",
"z",
",",
"ac",
"=",
"self",
".",
"_ac",
",",
"Delta",
"=",
"self",
".",
"_Delta",
")",
"jac",
"=",
"bovy_coords",
".",
"Rz_to_lambdanu_jac",
"(",
"R",
",",
"z",
",",
"Delta",
"=",
"self",
".",
"_Delta",
")",
"dldR",
"=",
"jac",
"[",
"0",
",",
"0",
"]",
"dndR",
"=",
"jac",
"[",
"1",
",",
"0",
"]",
"return",
"-",
"(",
"dldR",
"*",
"self",
".",
"_lderiv",
"(",
"l",
",",
"n",
")",
"+",
"dndR",
"*",
"self",
".",
"_nderiv",
"(",
"l",
",",
"n",
")",
")"
] | 32.227273 | 15.954545 |
def delete_key(self, key_name, headers=None,
version_id=None, mfa_token=None, callback=None):
"""
Deletes a key from the bucket. If a version_id is provided,
only that version of the key will be deleted.
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: The version ID (optional)
:type mfa_token: tuple or list of strings
:param mfa_token: A tuple or list consisting of the serial number
from the MFA device and the current value of
the six-digit token associated with the device.
This value is required anytime you are
deleting versioned objects from a bucket
that has the MFADelete option on the bucket.
"""
provider = self.connection.provider
if version_id:
query_args = 'versionId=%s' % version_id
else:
query_args = None
if mfa_token:
if not headers:
headers = {}
headers[provider.mfa_header] = ' '.join(mfa_token)
def key_deleted(response):
body = response.read()
if response.status != 204:
raise provider.storage_response_error(response.status,
response.reason, body)
if callable(callback):
callback(True)
self.connection.make_request('DELETE', self.name, key_name,
headers=headers,
query_args=query_args, callback=key_deleted)
|
[
"def",
"delete_key",
"(",
"self",
",",
"key_name",
",",
"headers",
"=",
"None",
",",
"version_id",
"=",
"None",
",",
"mfa_token",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"provider",
"=",
"self",
".",
"connection",
".",
"provider",
"if",
"version_id",
":",
"query_args",
"=",
"'versionId=%s'",
"%",
"version_id",
"else",
":",
"query_args",
"=",
"None",
"if",
"mfa_token",
":",
"if",
"not",
"headers",
":",
"headers",
"=",
"{",
"}",
"headers",
"[",
"provider",
".",
"mfa_header",
"]",
"=",
"' '",
".",
"join",
"(",
"mfa_token",
")",
"def",
"key_deleted",
"(",
"response",
")",
":",
"body",
"=",
"response",
".",
"read",
"(",
")",
"if",
"response",
".",
"status",
"!=",
"204",
":",
"raise",
"provider",
".",
"storage_response_error",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
",",
"body",
")",
"if",
"callable",
"(",
"callback",
")",
":",
"callback",
"(",
"True",
")",
"self",
".",
"connection",
".",
"make_request",
"(",
"'DELETE'",
",",
"self",
".",
"name",
",",
"key_name",
",",
"headers",
"=",
"headers",
",",
"query_args",
"=",
"query_args",
",",
"callback",
"=",
"key_deleted",
")"
] | 43.846154 | 18.25641 |
def get(self, key, func=None, args=(), kwargs=None, **opts):
"""Manually retrieve a value from the cache, calculating as needed.
Params:
key -> string to store/retrieve value from.
func -> callable to generate value if it does not exist, or has
expired.
args -> positional arguments to call the function with.
kwargs -> keyword arguments to call the function with.
Keyword Params (options):
These will be combined with region values (as selected by the
"region" keyword argument, and then selected by "parent" values
of those regions all the way up the chain to the "default" region).
namespace -> string prefix to apply to the key before get/set.
lock -> lock constructor. See README.
expiry -> float unix expiration time.
max_age -> float number of seconds until the value expires. Only
provide expiry OR max_age, not both.
"""
kwargs = kwargs or {}
key, store = self._expand_opts(key, opts)
# Resolve the etag.
opts['etag'] = call_or_pass(opts.get('etag') or opts.get('etagger'), args, kwargs)
if not isinstance(key, str):
raise TypeError('non-string key of type %s' % type(key))
data = store.get(key)
if data is not None:
if not self._has_expired(data, opts):
return data[VALUE_INDEX]
if func is None:
return None
# Prioritize passed options over a store's native lock.
lock_func = opts.get('lock') or getattr(store, 'lock', None)
lock = lock_func and lock_func(key)
locked = lock and lock.acquire(opts.get('timeout', DEFAULT_TIMEOUT))
try:
value = func(*args, **kwargs)
finally:
if locked:
lock.release()
creation = time()
expiry = call_or_pass(opts.get('expiry'), args, kwargs)
max_age = call_or_pass(opts.get('max_age'), args, kwargs)
if max_age is not None:
expiry = min(x for x in (expiry, creation + max_age) if x is not None)
# Need to be careful as this is the only place where we do not use the
# lovely index constants.
store[key] = (CURRENT_PROTOCOL_VERSION, creation, expiry, opts.get('etag'), value)
return value
|
[
"def",
"get",
"(",
"self",
",",
"key",
",",
"func",
"=",
"None",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
",",
"*",
"*",
"opts",
")",
":",
"kwargs",
"=",
"kwargs",
"or",
"{",
"}",
"key",
",",
"store",
"=",
"self",
".",
"_expand_opts",
"(",
"key",
",",
"opts",
")",
"# Resolve the etag.",
"opts",
"[",
"'etag'",
"]",
"=",
"call_or_pass",
"(",
"opts",
".",
"get",
"(",
"'etag'",
")",
"or",
"opts",
".",
"get",
"(",
"'etagger'",
")",
",",
"args",
",",
"kwargs",
")",
"if",
"not",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'non-string key of type %s'",
"%",
"type",
"(",
"key",
")",
")",
"data",
"=",
"store",
".",
"get",
"(",
"key",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"_has_expired",
"(",
"data",
",",
"opts",
")",
":",
"return",
"data",
"[",
"VALUE_INDEX",
"]",
"if",
"func",
"is",
"None",
":",
"return",
"None",
"# Prioritize passed options over a store's native lock.",
"lock_func",
"=",
"opts",
".",
"get",
"(",
"'lock'",
")",
"or",
"getattr",
"(",
"store",
",",
"'lock'",
",",
"None",
")",
"lock",
"=",
"lock_func",
"and",
"lock_func",
"(",
"key",
")",
"locked",
"=",
"lock",
"and",
"lock",
".",
"acquire",
"(",
"opts",
".",
"get",
"(",
"'timeout'",
",",
"DEFAULT_TIMEOUT",
")",
")",
"try",
":",
"value",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"if",
"locked",
":",
"lock",
".",
"release",
"(",
")",
"creation",
"=",
"time",
"(",
")",
"expiry",
"=",
"call_or_pass",
"(",
"opts",
".",
"get",
"(",
"'expiry'",
")",
",",
"args",
",",
"kwargs",
")",
"max_age",
"=",
"call_or_pass",
"(",
"opts",
".",
"get",
"(",
"'max_age'",
")",
",",
"args",
",",
"kwargs",
")",
"if",
"max_age",
"is",
"not",
"None",
":",
"expiry",
"=",
"min",
"(",
"x",
"for",
"x",
"in",
"(",
"expiry",
",",
"creation",
"+",
"max_age",
")",
"if",
"x",
"is",
"not",
"None",
")",
"# Need to be careful as this is the only place where we do not use the",
"# lovely index constants.",
"store",
"[",
"key",
"]",
"=",
"(",
"CURRENT_PROTOCOL_VERSION",
",",
"creation",
",",
"expiry",
",",
"opts",
".",
"get",
"(",
"'etag'",
")",
",",
"value",
")",
"return",
"value"
] | 38.655738 | 24.229508 |
def cnst_AT(self, Y):
r"""Compute :math:`A^T \mathbf{y}`. In this case
:math:`A^T \mathbf{y} = (I \;\; \Gamma_0^T \;\; \Gamma_1^T \;\;
\ldots) \mathbf{y}`.
"""
return self.cnst_A0T(self.block_sep0(Y)) + \
np.sum(self.cnst_A1T(self.block_sep1(Y)), axis=-1)
|
[
"def",
"cnst_AT",
"(",
"self",
",",
"Y",
")",
":",
"return",
"self",
".",
"cnst_A0T",
"(",
"self",
".",
"block_sep0",
"(",
"Y",
")",
")",
"+",
"np",
".",
"sum",
"(",
"self",
".",
"cnst_A1T",
"(",
"self",
".",
"block_sep1",
"(",
"Y",
")",
")",
",",
"axis",
"=",
"-",
"1",
")"
] | 37.625 | 17 |
def add_adjust(self, data, prehashed=False):
"""Add a new leaf, and adjust the tree, without rebuilding the whole thing.
"""
subtrees = self._get_whole_subtrees()
new_node = Node(data, prehashed=prehashed)
self.leaves.append(new_node)
for node in reversed(subtrees):
new_parent = Node(node.val + new_node.val)
node.p, new_node.p = new_parent, new_parent
new_parent.l, new_parent.r = node, new_node
node.sib, new_node.sib = new_node, node
node.side, new_node.side = 'L', 'R'
new_node = new_node.p
self.root = new_node
|
[
"def",
"add_adjust",
"(",
"self",
",",
"data",
",",
"prehashed",
"=",
"False",
")",
":",
"subtrees",
"=",
"self",
".",
"_get_whole_subtrees",
"(",
")",
"new_node",
"=",
"Node",
"(",
"data",
",",
"prehashed",
"=",
"prehashed",
")",
"self",
".",
"leaves",
".",
"append",
"(",
"new_node",
")",
"for",
"node",
"in",
"reversed",
"(",
"subtrees",
")",
":",
"new_parent",
"=",
"Node",
"(",
"node",
".",
"val",
"+",
"new_node",
".",
"val",
")",
"node",
".",
"p",
",",
"new_node",
".",
"p",
"=",
"new_parent",
",",
"new_parent",
"new_parent",
".",
"l",
",",
"new_parent",
".",
"r",
"=",
"node",
",",
"new_node",
"node",
".",
"sib",
",",
"new_node",
".",
"sib",
"=",
"new_node",
",",
"node",
"node",
".",
"side",
",",
"new_node",
".",
"side",
"=",
"'L'",
",",
"'R'",
"new_node",
"=",
"new_node",
".",
"p",
"self",
".",
"root",
"=",
"new_node"
] | 45.071429 | 7.5 |
def roc_values(fg_vals, bg_vals):
"""
Return fpr (x) and tpr (y) of the ROC curve.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
fpr : array
False positive rate.
tpr : array
True positive rate.
"""
if len(fg_vals) == 0:
return 0
y_true, y_score = values_to_labels(fg_vals, bg_vals)
fpr, tpr, _thresholds = roc_curve(y_true, y_score)
return fpr, tpr
|
[
"def",
"roc_values",
"(",
"fg_vals",
",",
"bg_vals",
")",
":",
"if",
"len",
"(",
"fg_vals",
")",
"==",
"0",
":",
"return",
"0",
"y_true",
",",
"y_score",
"=",
"values_to_labels",
"(",
"fg_vals",
",",
"bg_vals",
")",
"fpr",
",",
"tpr",
",",
"_thresholds",
"=",
"roc_curve",
"(",
"y_true",
",",
"y_score",
")",
"return",
"fpr",
",",
"tpr"
] | 20.62963 | 20.925926 |
def load_from_file(swag_path, swag_type='yml', root_path=None):
"""
Load specs from YAML file
"""
if swag_type not in ('yaml', 'yml'):
raise AttributeError("Currently only yaml or yml supported")
# TODO: support JSON
try:
enc = detect_by_bom(swag_path)
with codecs.open(swag_path, encoding=enc) as yaml_file:
return yaml_file.read()
except IOError:
# not in the same dir, add dirname
swag_path = os.path.join(
root_path or os.path.dirname(__file__), swag_path
)
try:
enc = detect_by_bom(swag_path)
with codecs.open(swag_path, encoding=enc) as yaml_file:
return yaml_file.read()
except IOError: # pragma: no cover
# if package dir
# see https://github.com/rochacbruno/flasgger/pull/104
# Still not able to reproduce this case
# test are in examples/package_example
# need more detail on how to reproduce IOError here
swag_path = swag_path.replace("/", os.sep).replace("\\", os.sep)
path = swag_path.replace(
(root_path or os.path.dirname(__file__)), ''
).split(os.sep)[1:]
site_package = imp.find_module(path[0])[1]
swag_path = os.path.join(site_package, os.sep.join(path[1:]))
with open(swag_path) as yaml_file:
return yaml_file.read()
|
[
"def",
"load_from_file",
"(",
"swag_path",
",",
"swag_type",
"=",
"'yml'",
",",
"root_path",
"=",
"None",
")",
":",
"if",
"swag_type",
"not",
"in",
"(",
"'yaml'",
",",
"'yml'",
")",
":",
"raise",
"AttributeError",
"(",
"\"Currently only yaml or yml supported\"",
")",
"# TODO: support JSON",
"try",
":",
"enc",
"=",
"detect_by_bom",
"(",
"swag_path",
")",
"with",
"codecs",
".",
"open",
"(",
"swag_path",
",",
"encoding",
"=",
"enc",
")",
"as",
"yaml_file",
":",
"return",
"yaml_file",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"# not in the same dir, add dirname",
"swag_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_path",
"or",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"swag_path",
")",
"try",
":",
"enc",
"=",
"detect_by_bom",
"(",
"swag_path",
")",
"with",
"codecs",
".",
"open",
"(",
"swag_path",
",",
"encoding",
"=",
"enc",
")",
"as",
"yaml_file",
":",
"return",
"yaml_file",
".",
"read",
"(",
")",
"except",
"IOError",
":",
"# pragma: no cover",
"# if package dir",
"# see https://github.com/rochacbruno/flasgger/pull/104",
"# Still not able to reproduce this case",
"# test are in examples/package_example",
"# need more detail on how to reproduce IOError here",
"swag_path",
"=",
"swag_path",
".",
"replace",
"(",
"\"/\"",
",",
"os",
".",
"sep",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"os",
".",
"sep",
")",
"path",
"=",
"swag_path",
".",
"replace",
"(",
"(",
"root_path",
"or",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"sep",
")",
"[",
"1",
":",
"]",
"site_package",
"=",
"imp",
".",
"find_module",
"(",
"path",
"[",
"0",
"]",
")",
"[",
"1",
"]",
"swag_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"site_package",
",",
"os",
".",
"sep",
".",
"join",
"(",
"path",
"[",
"1",
":",
"]",
")",
")",
"with",
"open",
"(",
"swag_path",
")",
"as",
"yaml_file",
":",
"return",
"yaml_file",
".",
"read",
"(",
")"
] | 40.771429 | 14.942857 |
def _from_dict(cls, _dict):
"""Initialize a UtteranceAnalyses object from a json dictionary."""
args = {}
if 'utterances_tone' in _dict:
args['utterances_tone'] = [
UtteranceAnalysis._from_dict(x)
for x in (_dict.get('utterances_tone'))
]
else:
raise ValueError(
'Required property \'utterances_tone\' not present in UtteranceAnalyses JSON'
)
if 'warning' in _dict:
args['warning'] = _dict.get('warning')
return cls(**args)
|
[
"def",
"_from_dict",
"(",
"cls",
",",
"_dict",
")",
":",
"args",
"=",
"{",
"}",
"if",
"'utterances_tone'",
"in",
"_dict",
":",
"args",
"[",
"'utterances_tone'",
"]",
"=",
"[",
"UtteranceAnalysis",
".",
"_from_dict",
"(",
"x",
")",
"for",
"x",
"in",
"(",
"_dict",
".",
"get",
"(",
"'utterances_tone'",
")",
")",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Required property \\'utterances_tone\\' not present in UtteranceAnalyses JSON'",
")",
"if",
"'warning'",
"in",
"_dict",
":",
"args",
"[",
"'warning'",
"]",
"=",
"_dict",
".",
"get",
"(",
"'warning'",
")",
"return",
"cls",
"(",
"*",
"*",
"args",
")"
] | 37.666667 | 16 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Setting(key)
if key not in Setting._member_map_:
extend_enum(Setting, key, default)
return Setting[key]
|
[
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Setting",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Setting",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Setting",
",",
"key",
",",
"default",
")",
"return",
"Setting",
"[",
"key",
"]"
] | 36.285714 | 7.714286 |
def auto_load_app_modules(self, modules):
"""Auto load app modules"""
for app in apps.get_app_configs():
for module in modules:
try:
import_module('{}.{}'.format(app.module.__package__, module))
except ImportError:
pass
|
[
"def",
"auto_load_app_modules",
"(",
"self",
",",
"modules",
")",
":",
"for",
"app",
"in",
"apps",
".",
"get_app_configs",
"(",
")",
":",
"for",
"module",
"in",
"modules",
":",
"try",
":",
"import_module",
"(",
"'{}.{}'",
".",
"format",
"(",
"app",
".",
"module",
".",
"__package__",
",",
"module",
")",
")",
"except",
"ImportError",
":",
"pass"
] | 39 | 11.375 |
def isheader(self, line):
"""Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.
"""
i = line.find(':')
if i > -1:
return line[:i].lower()
return None
|
[
"def",
"isheader",
"(",
"self",
",",
"line",
")",
":",
"i",
"=",
"line",
".",
"find",
"(",
"':'",
")",
"if",
"i",
">",
"-",
"1",
":",
"return",
"line",
"[",
":",
"i",
"]",
".",
"lower",
"(",
")",
"return",
"None"
] | 37.454545 | 19.545455 |
def check_class(obj, target_class, allow_none = False):
""" Checks that the obj is a (sub)type of target_class.
Raises a TypeError if this is not the case.
:param obj: object whos type is to be checked
:type obj: any type
:param target_class: target type/class
:type target_class: any class or type
:param allow_none: if true obj may be None
:type allow_none: boolean
"""
if not isinstance(obj, target_class):
if not (allow_none and obj is None):
raise TypeError("obj must be a of type {}, got: {}"
.format(target_class, type(obj)))
|
[
"def",
"check_class",
"(",
"obj",
",",
"target_class",
",",
"allow_none",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"target_class",
")",
":",
"if",
"not",
"(",
"allow_none",
"and",
"obj",
"is",
"None",
")",
":",
"raise",
"TypeError",
"(",
"\"obj must be a of type {}, got: {}\"",
".",
"format",
"(",
"target_class",
",",
"type",
"(",
"obj",
")",
")",
")"
] | 42.4 | 11.266667 |
def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p():
"""Gets to 2.92 in just under 4 days on 8 p100s."""
hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l()
hparams.num_decoder_layers = 14
hparams.batch_size = 8
hparams.layer_prepostprocess_dropout = 0.2
return hparams
|
[
"def",
"imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p",
"(",
")",
":",
"hparams",
"=",
"imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l",
"(",
")",
"hparams",
".",
"num_decoder_layers",
"=",
"14",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.2",
"return",
"hparams"
] | 42.142857 | 13.714286 |
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
|
[
"def",
"grow",
"(",
"self",
",",
"amount",
")",
":",
"if",
"self",
".",
"max",
">",
"0",
":",
"budget",
"=",
"max",
"(",
"self",
".",
"max",
"-",
"len",
"(",
"self",
".",
"_threads",
")",
",",
"0",
")",
"else",
":",
"# self.max <= 0 indicates no maximum",
"budget",
"=",
"float",
"(",
"'inf'",
")",
"n_new",
"=",
"min",
"(",
"amount",
",",
"budget",
")",
"workers",
"=",
"[",
"self",
".",
"_spawn_worker",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"n_new",
")",
"]",
"while",
"not",
"all",
"(",
"worker",
".",
"ready",
"for",
"worker",
"in",
"workers",
")",
":",
"time",
".",
"sleep",
"(",
".1",
")",
"self",
".",
"_threads",
".",
"extend",
"(",
"workers",
")"
] | 34.071429 | 16.785714 |
def get_sec_project_activity(self):
"""
Generate the "project activity" section of the report.
"""
logger.debug("Calculating Project Activity metrics.")
data_path = os.path.join(self.data_dir, "activity")
if not os.path.exists(data_path):
os.makedirs(data_path)
for ds in self.data_sources:
metric_file = self.ds2class[ds]
metric_index = self.get_metric_index(ds)
project_activity = metric_file.project_activity(metric_index, self.start_date,
self.end_date)
headers = []
data_frames = []
title_names = []
file_name = ""
for metric in project_activity['metrics']:
file_name += metric.DS_NAME + "_" + metric.id + "_"
title_names.append(metric.name)
headers.append(metric.id)
data_frames.append(metric.timeseries(dataframe=True))
file_name = file_name[:-1] # remove trailing underscore
file_path = os.path.join(data_path, file_name)
title_name = " & ".join(title_names) + ' per ' + self.interval
self.create_csv_fig_from_df(data_frames, file_path, headers,
fig_type="bar", title=title_name)
|
[
"def",
"get_sec_project_activity",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Calculating Project Activity metrics.\"",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"data_dir",
",",
"\"activity\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"data_path",
")",
":",
"os",
".",
"makedirs",
"(",
"data_path",
")",
"for",
"ds",
"in",
"self",
".",
"data_sources",
":",
"metric_file",
"=",
"self",
".",
"ds2class",
"[",
"ds",
"]",
"metric_index",
"=",
"self",
".",
"get_metric_index",
"(",
"ds",
")",
"project_activity",
"=",
"metric_file",
".",
"project_activity",
"(",
"metric_index",
",",
"self",
".",
"start_date",
",",
"self",
".",
"end_date",
")",
"headers",
"=",
"[",
"]",
"data_frames",
"=",
"[",
"]",
"title_names",
"=",
"[",
"]",
"file_name",
"=",
"\"\"",
"for",
"metric",
"in",
"project_activity",
"[",
"'metrics'",
"]",
":",
"file_name",
"+=",
"metric",
".",
"DS_NAME",
"+",
"\"_\"",
"+",
"metric",
".",
"id",
"+",
"\"_\"",
"title_names",
".",
"append",
"(",
"metric",
".",
"name",
")",
"headers",
".",
"append",
"(",
"metric",
".",
"id",
")",
"data_frames",
".",
"append",
"(",
"metric",
".",
"timeseries",
"(",
"dataframe",
"=",
"True",
")",
")",
"file_name",
"=",
"file_name",
"[",
":",
"-",
"1",
"]",
"# remove trailing underscore",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"file_name",
")",
"title_name",
"=",
"\" & \"",
".",
"join",
"(",
"title_names",
")",
"+",
"' per '",
"+",
"self",
".",
"interval",
"self",
".",
"create_csv_fig_from_df",
"(",
"data_frames",
",",
"file_path",
",",
"headers",
",",
"fig_type",
"=",
"\"bar\"",
",",
"title",
"=",
"title_name",
")"
] | 43.16129 | 19.806452 |
def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
})
|
[
"def",
"bind_expression_to_resources",
"(",
"expr",
",",
"resources",
")",
":",
"# bind the resources into the expression",
"if",
"resources",
"is",
"None",
":",
"resources",
"=",
"{",
"}",
"# _subs stands for substitute. It's not actually private, blaze just",
"# prefixes symbol-manipulation methods with underscores to prevent",
"# collisions with data column names.",
"return",
"expr",
".",
"_subs",
"(",
"{",
"k",
":",
"bz",
".",
"data",
"(",
"v",
",",
"dshape",
"=",
"k",
".",
"dshape",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"resources",
")",
"}",
")"
] | 30.076923 | 20 |
def inflate_to_one_hot(tensor, classes):
"""
Converts a tensor with index form to a one hot tensor.
:param tensor: A tensor of shape [batch, h, w, 1]
:param classes: The number of classes that exist. (length of one hot encoding)
:return: A tensor of shape [batch, h, w, classes].
"""
one_hot = tf.one_hot(tensor, classes)
shape = one_hot.get_shape().as_list()
return tf.reshape(one_hot, shape=[-1, shape[1], shape[2], shape[4]])
|
[
"def",
"inflate_to_one_hot",
"(",
"tensor",
",",
"classes",
")",
":",
"one_hot",
"=",
"tf",
".",
"one_hot",
"(",
"tensor",
",",
"classes",
")",
"shape",
"=",
"one_hot",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"return",
"tf",
".",
"reshape",
"(",
"one_hot",
",",
"shape",
"=",
"[",
"-",
"1",
",",
"shape",
"[",
"1",
"]",
",",
"shape",
"[",
"2",
"]",
",",
"shape",
"[",
"4",
"]",
"]",
")"
] | 45.5 | 12.1 |
def encode(self, word):
"""Return the MRA personal numeric identifier (PNI) for a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The MRA PNI
Examples
--------
>>> pe = MRA()
>>> pe.encode('Christopher')
'CHRPHR'
>>> pe.encode('Niall')
'NL'
>>> pe.encode('Smith')
'SMTH'
>>> pe.encode('Schmidt')
'SCHMDT'
"""
if not word:
return word
word = word.upper()
word = word.replace('ß', 'SS')
word = word[0] + ''.join(
c for c in word[1:] if c not in self._uc_v_set
)
word = self._delete_consecutive_repeats(word)
if len(word) > 6:
word = word[:3] + word[-3:]
return word
|
[
"def",
"encode",
"(",
"self",
",",
"word",
")",
":",
"if",
"not",
"word",
":",
"return",
"word",
"word",
"=",
"word",
".",
"upper",
"(",
")",
"word",
"=",
"word",
".",
"replace",
"(",
"'ß',",
" ",
"SS')",
"",
"word",
"=",
"word",
"[",
"0",
"]",
"+",
"''",
".",
"join",
"(",
"c",
"for",
"c",
"in",
"word",
"[",
"1",
":",
"]",
"if",
"c",
"not",
"in",
"self",
".",
"_uc_v_set",
")",
"word",
"=",
"self",
".",
"_delete_consecutive_repeats",
"(",
"word",
")",
"if",
"len",
"(",
"word",
")",
">",
"6",
":",
"word",
"=",
"word",
"[",
":",
"3",
"]",
"+",
"word",
"[",
"-",
"3",
":",
"]",
"return",
"word"
] | 22.702703 | 19.027027 |
def main():
"""The main function of the script"""
desc = 'Benchmark the files generated by generate.py'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'--src',
dest='src_dir',
default='generated',
help='The directory containing the sources to benchmark'
)
parser.add_argument(
'--out',
dest='out_dir',
default='../../doc',
help='The output directory'
)
parser.add_argument(
'--include',
dest='include',
default='include',
help='The directory containing the headeres for the benchmark'
)
parser.add_argument(
'--boost_headers',
dest='boost_headers',
default='../../../..',
help='The directory containing the Boost headers (the boost directory)'
)
parser.add_argument(
'--compiler',
dest='compiler',
default='g++',
help='The compiler to do the benchmark with'
)
parser.add_argument(
'--repeat_count',
dest='repeat_count',
type=int,
default=5,
help='How many times a measurement should be repeated.'
)
args = parser.parse_args()
compiler = compiler_info(args.compiler)
results = benchmark(
args.src_dir,
args.compiler,
[args.include, args.boost_headers],
args.repeat_count
)
plot_diagrams(results, configs_in(args.src_dir), compiler, args.out_dir)
|
[
"def",
"main",
"(",
")",
":",
"desc",
"=",
"'Benchmark the files generated by generate.py'",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"desc",
")",
"parser",
".",
"add_argument",
"(",
"'--src'",
",",
"dest",
"=",
"'src_dir'",
",",
"default",
"=",
"'generated'",
",",
"help",
"=",
"'The directory containing the sources to benchmark'",
")",
"parser",
".",
"add_argument",
"(",
"'--out'",
",",
"dest",
"=",
"'out_dir'",
",",
"default",
"=",
"'../../doc'",
",",
"help",
"=",
"'The output directory'",
")",
"parser",
".",
"add_argument",
"(",
"'--include'",
",",
"dest",
"=",
"'include'",
",",
"default",
"=",
"'include'",
",",
"help",
"=",
"'The directory containing the headeres for the benchmark'",
")",
"parser",
".",
"add_argument",
"(",
"'--boost_headers'",
",",
"dest",
"=",
"'boost_headers'",
",",
"default",
"=",
"'../../../..'",
",",
"help",
"=",
"'The directory containing the Boost headers (the boost directory)'",
")",
"parser",
".",
"add_argument",
"(",
"'--compiler'",
",",
"dest",
"=",
"'compiler'",
",",
"default",
"=",
"'g++'",
",",
"help",
"=",
"'The compiler to do the benchmark with'",
")",
"parser",
".",
"add_argument",
"(",
"'--repeat_count'",
",",
"dest",
"=",
"'repeat_count'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5",
",",
"help",
"=",
"'How many times a measurement should be repeated.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"compiler",
"=",
"compiler_info",
"(",
"args",
".",
"compiler",
")",
"results",
"=",
"benchmark",
"(",
"args",
".",
"src_dir",
",",
"args",
".",
"compiler",
",",
"[",
"args",
".",
"include",
",",
"args",
".",
"boost_headers",
"]",
",",
"args",
".",
"repeat_count",
")",
"plot_diagrams",
"(",
"results",
",",
"configs_in",
"(",
"args",
".",
"src_dir",
")",
",",
"compiler",
",",
"args",
".",
"out_dir",
")"
] | 27.018868 | 20.584906 |
def GetValueLength(rd, pos):
"""Get value length for a key in rd.
For a key at position pos in the Report Descriptor rd, return the length
of the associated value. This supports both short and long format
values.
Args:
rd: Report Descriptor
pos: The position of the key in rd.
Returns:
(key_size, data_len) where key_size is the number of bytes occupied by
the key and data_len is the length of the value associated by the key.
"""
rd = bytearray(rd)
key = rd[pos]
if key == LONG_ITEM_ENCODING:
# If the key is tagged as a long item (0xfe), then the format is
# [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].
# Thus, the entire key record is 3 bytes long.
if pos + 1 < len(rd):
return (3, rd[pos + 1])
else:
raise errors.HidError('Malformed report descriptor')
else:
# If the key is tagged as a short item, then the item tag and data len are
# packed into one byte. The format is thus:
# [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].
# The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).
code = key & 0x03
if code <= 0x02:
return (1, code)
elif code == 0x03:
return (1, 4)
raise errors.HidError('Cannot happen')
|
[
"def",
"GetValueLength",
"(",
"rd",
",",
"pos",
")",
":",
"rd",
"=",
"bytearray",
"(",
"rd",
")",
"key",
"=",
"rd",
"[",
"pos",
"]",
"if",
"key",
"==",
"LONG_ITEM_ENCODING",
":",
"# If the key is tagged as a long item (0xfe), then the format is",
"# [key (1 byte)] [data len (1 byte)] [item tag (1 byte)] [data (n # bytes)].",
"# Thus, the entire key record is 3 bytes long.",
"if",
"pos",
"+",
"1",
"<",
"len",
"(",
"rd",
")",
":",
"return",
"(",
"3",
",",
"rd",
"[",
"pos",
"+",
"1",
"]",
")",
"else",
":",
"raise",
"errors",
".",
"HidError",
"(",
"'Malformed report descriptor'",
")",
"else",
":",
"# If the key is tagged as a short item, then the item tag and data len are",
"# packed into one byte. The format is thus:",
"# [tag (high 4 bits)] [type (2 bits)] [size code (2 bits)] [data (n bytes)].",
"# The size code specifies 1,2, or 4 bytes (0x03 means 4 bytes).",
"code",
"=",
"key",
"&",
"0x03",
"if",
"code",
"<=",
"0x02",
":",
"return",
"(",
"1",
",",
"code",
")",
"elif",
"code",
"==",
"0x03",
":",
"return",
"(",
"1",
",",
"4",
")",
"raise",
"errors",
".",
"HidError",
"(",
"'Cannot happen'",
")"
] | 33.157895 | 23.710526 |
def _run_systemdrun_decide(self):
"""
Internal method
decide if it is possible to use --wait option to systemd
for example RHEL7 does not support --wait option
:return: bool
"""
if self.systemd_wait_support is None:
self.systemd_wait_support = "--wait" in run_cmd(
["systemd-run", "--help"], return_output=True)
return self.systemd_wait_support
|
[
"def",
"_run_systemdrun_decide",
"(",
"self",
")",
":",
"if",
"self",
".",
"systemd_wait_support",
"is",
"None",
":",
"self",
".",
"systemd_wait_support",
"=",
"\"--wait\"",
"in",
"run_cmd",
"(",
"[",
"\"systemd-run\"",
",",
"\"--help\"",
"]",
",",
"return_output",
"=",
"True",
")",
"return",
"self",
".",
"systemd_wait_support"
] | 35.5 | 14.166667 |
def add_color_stop_rgba(self, offset, red, green, blue, alpha=1):
"""Adds a translucent color stop to a gradient pattern.
The offset specifies the location along the gradient's control vector.
For example,
a linear gradient's control vector is from (x0,y0) to (x1,y1)
while a radial gradient's control vector is
from any point on the start circle
to the corresponding point on the end circle.
If two (or more) stops are specified with identical offset values,
they will be sorted
according to the order in which the stops are added
(stops added earlier before stops added later).
This can be useful for reliably making sharp color transitions
instead of the typical blend.
The color components and offset are in the range 0 to 1.
If the values passed in are outside that range, they will be clamped.
:param offset: Location along the gradient's control vector
:param red: Red component of the color.
:param green: Green component of the color.
:param blue: Blue component of the color.
:param alpha:
Alpha component of the color.
1 (the default) is opaque, 0 fully transparent.
:type offset: float
:type red: float
:type green: float
:type blue: float
:type alpha: float
"""
cairo.cairo_pattern_add_color_stop_rgba(
self._pointer, offset, red, green, blue, alpha)
self._check_status()
|
[
"def",
"add_color_stop_rgba",
"(",
"self",
",",
"offset",
",",
"red",
",",
"green",
",",
"blue",
",",
"alpha",
"=",
"1",
")",
":",
"cairo",
".",
"cairo_pattern_add_color_stop_rgba",
"(",
"self",
".",
"_pointer",
",",
"offset",
",",
"red",
",",
"green",
",",
"blue",
",",
"alpha",
")",
"self",
".",
"_check_status",
"(",
")"
] | 40.891892 | 19.378378 |
def _TempRootPath():
"""Returns a default root path for storing temporary files."""
# `FLAGS.test_tmpdir` is defined only in test environment, so we can't expect
# for it to be always defined.
test_tmpdir = (
compatibility.Environ("TEST_TMPDIR", default=None) or
FLAGS.get_flag_value("test_tmpdir", default=None))
if not os.path.exists(test_tmpdir):
# TODO: We add a try-catch block to avoid rare race condition.
# In Python 3 the exception being thrown is way more specific
# (`FileExistsError`) but in Python 2 `OSError` is the best we can do. Once
# support for Python 2 is dropped we can switch to catching that and remove
# the conditional (EAFP).
try:
os.makedirs(test_tmpdir)
except OSError as err:
logging.error(err)
# TODO(hanuszczak): Investigate whether this check still makes sense.
if platform.system() == "Windows":
return None
return test_tmpdir
|
[
"def",
"_TempRootPath",
"(",
")",
":",
"# `FLAGS.test_tmpdir` is defined only in test environment, so we can't expect",
"# for it to be always defined.",
"test_tmpdir",
"=",
"(",
"compatibility",
".",
"Environ",
"(",
"\"TEST_TMPDIR\"",
",",
"default",
"=",
"None",
")",
"or",
"FLAGS",
".",
"get_flag_value",
"(",
"\"test_tmpdir\"",
",",
"default",
"=",
"None",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"test_tmpdir",
")",
":",
"# TODO: We add a try-catch block to avoid rare race condition.",
"# In Python 3 the exception being thrown is way more specific",
"# (`FileExistsError`) but in Python 2 `OSError` is the best we can do. Once",
"# support for Python 2 is dropped we can switch to catching that and remove",
"# the conditional (EAFP).",
"try",
":",
"os",
".",
"makedirs",
"(",
"test_tmpdir",
")",
"except",
"OSError",
"as",
"err",
":",
"logging",
".",
"error",
"(",
"err",
")",
"# TODO(hanuszczak): Investigate whether this check still makes sense.",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"\"Windows\"",
":",
"return",
"None",
"return",
"test_tmpdir"
] | 38 | 22.5 |
def create_swag_from_ctx(ctx):
"""Creates SWAG client from the current context."""
swag_opts = {}
if ctx.type == 'file':
swag_opts = {
'swag.type': 'file',
'swag.data_dir': ctx.data_dir,
'swag.data_file': ctx.data_file
}
elif ctx.type == 's3':
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': ctx.bucket_name,
'swag.data_file': ctx.data_file,
'swag.region': ctx.region
}
elif ctx.type == 'dynamodb':
swag_opts = {
'swag.type': 'dynamodb',
'swag.region': ctx.region
}
return SWAGManager(**parse_swag_config_options(swag_opts))
|
[
"def",
"create_swag_from_ctx",
"(",
"ctx",
")",
":",
"swag_opts",
"=",
"{",
"}",
"if",
"ctx",
".",
"type",
"==",
"'file'",
":",
"swag_opts",
"=",
"{",
"'swag.type'",
":",
"'file'",
",",
"'swag.data_dir'",
":",
"ctx",
".",
"data_dir",
",",
"'swag.data_file'",
":",
"ctx",
".",
"data_file",
"}",
"elif",
"ctx",
".",
"type",
"==",
"'s3'",
":",
"swag_opts",
"=",
"{",
"'swag.type'",
":",
"'s3'",
",",
"'swag.bucket_name'",
":",
"ctx",
".",
"bucket_name",
",",
"'swag.data_file'",
":",
"ctx",
".",
"data_file",
",",
"'swag.region'",
":",
"ctx",
".",
"region",
"}",
"elif",
"ctx",
".",
"type",
"==",
"'dynamodb'",
":",
"swag_opts",
"=",
"{",
"'swag.type'",
":",
"'dynamodb'",
",",
"'swag.region'",
":",
"ctx",
".",
"region",
"}",
"return",
"SWAGManager",
"(",
"*",
"*",
"parse_swag_config_options",
"(",
"swag_opts",
")",
")"
] | 31.272727 | 12.954545 |
def parse_bowtie_stats(self, stats_file):
"""
Parses Bowtie2 stats file, returns series with values.
:param str stats_file: Bowtie2 output file with alignment statistics.
"""
import pandas as pd
stats = pd.Series(index=["readCount", "unpaired", "unaligned", "unique", "multiple", "alignmentRate"])
try:
with open(stats_file) as handle:
content = handle.readlines() # list of strings per line
except:
return stats
# total reads
try:
line = [i for i in range(len(content)) if " reads; of these:" in content[i]][0]
stats["readCount"] = re.sub("\D.*", "", content[line])
if 7 > len(content) > 2:
line = [i for i in range(len(content)) if "were unpaired; of these:" in content[i]][0]
stats["unpaired"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
else:
line = [i for i in range(len(content)) if "were paired; of these:" in content[i]][0]
stats["unpaired"] = stats["readCount"] - int(re.sub("\D", "", re.sub("\(.*", "", content[line])))
line = [i for i in range(len(content)) if "aligned 0 times" in content[i]][0]
stats["unaligned"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned exactly 1 time" in content[i]][0]
stats["unique"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "aligned >1 times" in content[i]][0]
stats["multiple"] = re.sub("\D", "", re.sub("\(.*", "", content[line]))
line = [i for i in range(len(content)) if "overall alignment rate" in content[i]][0]
stats["alignmentRate"] = re.sub("\%.*", "", content[line]).strip()
except IndexError:
pass
return stats
|
[
"def",
"parse_bowtie_stats",
"(",
"self",
",",
"stats_file",
")",
":",
"import",
"pandas",
"as",
"pd",
"stats",
"=",
"pd",
".",
"Series",
"(",
"index",
"=",
"[",
"\"readCount\"",
",",
"\"unpaired\"",
",",
"\"unaligned\"",
",",
"\"unique\"",
",",
"\"multiple\"",
",",
"\"alignmentRate\"",
"]",
")",
"try",
":",
"with",
"open",
"(",
"stats_file",
")",
"as",
"handle",
":",
"content",
"=",
"handle",
".",
"readlines",
"(",
")",
"# list of strings per line",
"except",
":",
"return",
"stats",
"# total reads",
"try",
":",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\" reads; of these:\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"readCount\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\D.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
"if",
"7",
">",
"len",
"(",
"content",
")",
">",
"2",
":",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"were unpaired; of these:\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"unpaired\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\D\"",
",",
"\"\"",
",",
"re",
".",
"sub",
"(",
"\"\\(.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
")",
"else",
":",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"were paired; of these:\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"unpaired\"",
"]",
"=",
"stats",
"[",
"\"readCount\"",
"]",
"-",
"int",
"(",
"re",
".",
"sub",
"(",
"\"\\D\"",
",",
"\"\"",
",",
"re",
".",
"sub",
"(",
"\"\\(.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
")",
")",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"aligned 0 times\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"unaligned\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\D\"",
",",
"\"\"",
",",
"re",
".",
"sub",
"(",
"\"\\(.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
")",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"aligned exactly 1 time\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"unique\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\D\"",
",",
"\"\"",
",",
"re",
".",
"sub",
"(",
"\"\\(.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
")",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"aligned >1 times\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"multiple\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\D\"",
",",
"\"\"",
",",
"re",
".",
"sub",
"(",
"\"\\(.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
")",
"line",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"content",
")",
")",
"if",
"\"overall alignment rate\"",
"in",
"content",
"[",
"i",
"]",
"]",
"[",
"0",
"]",
"stats",
"[",
"\"alignmentRate\"",
"]",
"=",
"re",
".",
"sub",
"(",
"\"\\%.*\"",
",",
"\"\"",
",",
"content",
"[",
"line",
"]",
")",
".",
"strip",
"(",
")",
"except",
"IndexError",
":",
"pass",
"return",
"stats"
] | 57.909091 | 32.636364 |
def has_dynamic_getattr(self, context=None):
"""Check if the class has a custom __getattr__ or __getattribute__.
If any such method is found and it is not from
builtins, nor from an extension module, then the function
will return True.
:returns: True if the class has a custom
__getattr__ or __getattribute__, False otherwise.
:rtype: bool
"""
def _valid_getattr(node):
root = node.root()
return root.name != BUILTINS and getattr(root, "pure_python", None)
try:
return _valid_getattr(self.getattr("__getattr__", context)[0])
except exceptions.AttributeInferenceError:
# if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr("__getattribute__", context)[0]
return _valid_getattr(getattribute)
except exceptions.AttributeInferenceError:
pass
return False
|
[
"def",
"has_dynamic_getattr",
"(",
"self",
",",
"context",
"=",
"None",
")",
":",
"def",
"_valid_getattr",
"(",
"node",
")",
":",
"root",
"=",
"node",
".",
"root",
"(",
")",
"return",
"root",
".",
"name",
"!=",
"BUILTINS",
"and",
"getattr",
"(",
"root",
",",
"\"pure_python\"",
",",
"None",
")",
"try",
":",
"return",
"_valid_getattr",
"(",
"self",
".",
"getattr",
"(",
"\"__getattr__\"",
",",
"context",
")",
"[",
"0",
"]",
")",
"except",
"exceptions",
".",
"AttributeInferenceError",
":",
"# if self.newstyle: XXX cause an infinite recursion error",
"try",
":",
"getattribute",
"=",
"self",
".",
"getattr",
"(",
"\"__getattribute__\"",
",",
"context",
")",
"[",
"0",
"]",
"return",
"_valid_getattr",
"(",
"getattribute",
")",
"except",
"exceptions",
".",
"AttributeInferenceError",
":",
"pass",
"return",
"False"
] | 37.923077 | 21.076923 |
def download_pojo(self, path="", get_genmodel_jar=False, genmodel_name=""):
"""
Download the POJO for this model to the directory specified by path.
If path is an empty string, then dump the output to screen.
:param path: An absolute path to the directory where POJO should be saved.
:param get_genmodel_jar: if True, then also download h2o-genmodel.jar and store it in folder ``path``.
:param genmodel_name Custom name of genmodel jar
:returns: name of the POJO file written.
"""
assert_is_type(path, str)
assert_is_type(get_genmodel_jar, bool)
path = path.rstrip("/")
return h2o.download_pojo(self, path, get_jar=get_genmodel_jar, jar_name=genmodel_name)
|
[
"def",
"download_pojo",
"(",
"self",
",",
"path",
"=",
"\"\"",
",",
"get_genmodel_jar",
"=",
"False",
",",
"genmodel_name",
"=",
"\"\"",
")",
":",
"assert_is_type",
"(",
"path",
",",
"str",
")",
"assert_is_type",
"(",
"get_genmodel_jar",
",",
"bool",
")",
"path",
"=",
"path",
".",
"rstrip",
"(",
"\"/\"",
")",
"return",
"h2o",
".",
"download_pojo",
"(",
"self",
",",
"path",
",",
"get_jar",
"=",
"get_genmodel_jar",
",",
"jar_name",
"=",
"genmodel_name",
")"
] | 49.4 | 26.066667 |
def get_dev_vlans(devid, auth, url):
"""Function takes input of devID to issue RESTUL call to HP IMC
:param devid: requires devId as the only input parameter
:return: dictionary of existing vlans on the devices. Device must be supported in HP IMC platform VLAN manager module
"""
# checks to see if the imc credentials are already available
get_dev_vlans_url = "/imcrs/vlan?devId=" + str(devid) + "&start=0&size=5000&total=false"
f_url = url + get_dev_vlans_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_vlans = (json.loads(r.text))
return dev_vlans['vlan']
elif r.status_code == 409:
return {'vlan': 'no vlans'}
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_dev_vlans: An Error has occured'
|
[
"def",
"get_dev_vlans",
"(",
"devid",
",",
"auth",
",",
"url",
")",
":",
"# checks to see if the imc credentials are already available",
"get_dev_vlans_url",
"=",
"\"/imcrs/vlan?devId=\"",
"+",
"str",
"(",
"devid",
")",
"+",
"\"&start=0&size=5000&total=false\"",
"f_url",
"=",
"url",
"+",
"get_dev_vlans_url",
"# creates the URL using the payload variable as the contents",
"r",
"=",
"requests",
".",
"get",
"(",
"f_url",
",",
"auth",
"=",
"auth",
",",
"headers",
"=",
"HEADERS",
")",
"# r.status_code",
"try",
":",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"dev_vlans",
"=",
"(",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
")",
"return",
"dev_vlans",
"[",
"'vlan'",
"]",
"elif",
"r",
".",
"status_code",
"==",
"409",
":",
"return",
"{",
"'vlan'",
":",
"'no vlans'",
"}",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"e",
":",
"return",
"\"Error:\\n\"",
"+",
"str",
"(",
"e",
")",
"+",
"' get_dev_vlans: An Error has occured'"
] | 45 | 20.619048 |
async def _handle_metrics(self, request: Request) -> Response:
"""Handler for metrics."""
if self._update_handler:
await self._update_handler(self.registry.get_metrics())
response = Response(body=self.registry.generate_metrics())
response.content_type = CONTENT_TYPE_LATEST
return response
|
[
"async",
"def",
"_handle_metrics",
"(",
"self",
",",
"request",
":",
"Request",
")",
"->",
"Response",
":",
"if",
"self",
".",
"_update_handler",
":",
"await",
"self",
".",
"_update_handler",
"(",
"self",
".",
"registry",
".",
"get_metrics",
"(",
")",
")",
"response",
"=",
"Response",
"(",
"body",
"=",
"self",
".",
"registry",
".",
"generate_metrics",
"(",
")",
")",
"response",
".",
"content_type",
"=",
"CONTENT_TYPE_LATEST",
"return",
"response"
] | 47.857143 | 15.857143 |
def match_many(self, models, results, relation):
"""
Match the eargerly loaded resuls to their single parents.
:param models: The parents
:type models: list
:param results: The results collection
:type results: Collection
:param relation: The relation
:type relation: str
:rtype: list
"""
return self._match_one_or_many(models, results, relation, 'many')
|
[
"def",
"match_many",
"(",
"self",
",",
"models",
",",
"results",
",",
"relation",
")",
":",
"return",
"self",
".",
"_match_one_or_many",
"(",
"models",
",",
"results",
",",
"relation",
",",
"'many'",
")"
] | 26.9375 | 18.4375 |
def has_command(self, command):
"""Returns True if any of the plugins have the given command."""
for pbt in self._plugins.values():
if pbt.command == command:
return True
return False
|
[
"def",
"has_command",
"(",
"self",
",",
"command",
")",
":",
"for",
"pbt",
"in",
"self",
".",
"_plugins",
".",
"values",
"(",
")",
":",
"if",
"pbt",
".",
"command",
"==",
"command",
":",
"return",
"True",
"return",
"False"
] | 38.333333 | 7.666667 |
def get_tunnel_info_output_tunnel_has_conflicts(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
output = ET.SubElement(get_tunnel_info, "output")
tunnel = ET.SubElement(output, "tunnel")
has_conflicts = ET.SubElement(tunnel, "has-conflicts")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"get_tunnel_info_output_tunnel_has_conflicts",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"get_tunnel_info",
"=",
"ET",
".",
"Element",
"(",
"\"get_tunnel_info\"",
")",
"config",
"=",
"get_tunnel_info",
"output",
"=",
"ET",
".",
"SubElement",
"(",
"get_tunnel_info",
",",
"\"output\"",
")",
"tunnel",
"=",
"ET",
".",
"SubElement",
"(",
"output",
",",
"\"tunnel\"",
")",
"has_conflicts",
"=",
"ET",
".",
"SubElement",
"(",
"tunnel",
",",
"\"has-conflicts\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 40.333333 | 13.583333 |
def nextPlot(self):
"""Moves the displayed plot to the next one"""
if self.stacker.currentIndex() < self.stacker.count():
self.stacker.setCurrentIndex(self.stacker.currentIndex()+1)
|
[
"def",
"nextPlot",
"(",
"self",
")",
":",
"if",
"self",
".",
"stacker",
".",
"currentIndex",
"(",
")",
"<",
"self",
".",
"stacker",
".",
"count",
"(",
")",
":",
"self",
".",
"stacker",
".",
"setCurrentIndex",
"(",
"self",
".",
"stacker",
".",
"currentIndex",
"(",
")",
"+",
"1",
")"
] | 51.5 | 18.5 |
def check_sas_base_dir(root=None):
''' Check for the SAS_BASE_DIR environment variable
Will set the SAS_BASE_DIR in your local environment
or prompt you to define one if is undefined
Parameters:
root (str):
Optional override of the SAS_BASE_DIR envvar
'''
sasbasedir = root or os.getenv("SAS_BASE_DIR")
if not sasbasedir:
sasbasedir = input('Enter a path for SAS_BASE_DIR: ')
os.environ['SAS_BASE_DIR'] = sasbasedir
|
[
"def",
"check_sas_base_dir",
"(",
"root",
"=",
"None",
")",
":",
"sasbasedir",
"=",
"root",
"or",
"os",
".",
"getenv",
"(",
"\"SAS_BASE_DIR\"",
")",
"if",
"not",
"sasbasedir",
":",
"sasbasedir",
"=",
"input",
"(",
"'Enter a path for SAS_BASE_DIR: '",
")",
"os",
".",
"environ",
"[",
"'SAS_BASE_DIR'",
"]",
"=",
"sasbasedir"
] | 30.933333 | 20.666667 |
def _update_structure_lines(self):
'''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.'''
structure_lines = []
atom_chain_order = []
chain_atoms = {}
for line in self.lines:
linetype = line[0:6]
if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ':
chain_id = line[21]
self.residue_types.add(line[17:20].strip())
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
structure_lines.append(line)
if (chain_id not in atom_chain_order) and (chain_id != ' '):
atom_chain_order.append(chain_id)
if linetype == 'ATOM ':
atom_type = line[12:16].strip()
if atom_type:
chain_atoms[chain_id] = chain_atoms.get(chain_id, set())
chain_atoms[chain_id].add(atom_type)
if linetype == 'ENDMDL':
colortext.warning("ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.")
break
self.structure_lines = structure_lines
self.atom_chain_order = atom_chain_order
self.chain_atoms = chain_atoms
|
[
"def",
"_update_structure_lines",
"(",
"self",
")",
":",
"structure_lines",
"=",
"[",
"]",
"atom_chain_order",
"=",
"[",
"]",
"chain_atoms",
"=",
"{",
"}",
"for",
"line",
"in",
"self",
".",
"lines",
":",
"linetype",
"=",
"line",
"[",
"0",
":",
"6",
"]",
"if",
"linetype",
"==",
"'ATOM '",
"or",
"linetype",
"==",
"'HETATM'",
"or",
"linetype",
"==",
"'TER '",
":",
"chain_id",
"=",
"line",
"[",
"21",
"]",
"self",
".",
"residue_types",
".",
"add",
"(",
"line",
"[",
"17",
":",
"20",
"]",
".",
"strip",
"(",
")",
")",
"if",
"missing_chain_ids",
".",
"get",
"(",
"self",
".",
"pdb_id",
")",
":",
"chain_id",
"=",
"missing_chain_ids",
"[",
"self",
".",
"pdb_id",
"]",
"structure_lines",
".",
"append",
"(",
"line",
")",
"if",
"(",
"chain_id",
"not",
"in",
"atom_chain_order",
")",
"and",
"(",
"chain_id",
"!=",
"' '",
")",
":",
"atom_chain_order",
".",
"append",
"(",
"chain_id",
")",
"if",
"linetype",
"==",
"'ATOM '",
":",
"atom_type",
"=",
"line",
"[",
"12",
":",
"16",
"]",
".",
"strip",
"(",
")",
"if",
"atom_type",
":",
"chain_atoms",
"[",
"chain_id",
"]",
"=",
"chain_atoms",
".",
"get",
"(",
"chain_id",
",",
"set",
"(",
")",
")",
"chain_atoms",
"[",
"chain_id",
"]",
".",
"add",
"(",
"atom_type",
")",
"if",
"linetype",
"==",
"'ENDMDL'",
":",
"colortext",
".",
"warning",
"(",
"\"ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.\"",
")",
"break",
"self",
".",
"structure_lines",
"=",
"structure_lines",
"self",
".",
"atom_chain_order",
"=",
"atom_chain_order",
"self",
".",
"chain_atoms",
"=",
"chain_atoms"
] | 49.357143 | 22.142857 |
def build(self, recursive=True):
"""
Building an assembly buffers the :meth:`components` and :meth:`constraints`.
Running ``build()`` is optional, it's automatically run when requesting
:meth:`components` or :meth:`constraints`.
Mostly it's used to test that there aren't any critical runtime
issues with its construction, but doing anything like *displaying* or
*exporting* will ultimately run a build anyway.
:param recursive: if set, iterates through child components and builds
those as well.
:type recursive: :class:`bool`
"""
# initialize values
self._components = {}
self._constraints = []
def genwrap(obj, name, iter_type=None):
# Force obj to act like a generator.
# this wrapper will always yield at least once.
if isinstance(obj, GeneratorType):
for i in obj:
if (iter_type is not None) and (not isinstance(i, iter_type)):
raise TypeError("%s must yield a %r" % (name, iter_type))
yield i
else:
if (iter_type is not None) and (not isinstance(obj, iter_type)):
raise TypeError("%s must return a %r" % (name, iter_type))
yield obj
# Make Components
components_iter = genwrap(self.make_components(), "make_components", dict)
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
# Make Constraints
constraints_iter = genwrap(self.make_constraints(), "make_components", list)
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
# Run solver : sets components' world coordinates
self.solve()
# Make Alterations
alterations_iter = genwrap(self.make_alterations(), "make_alterations")
next(alterations_iter) # return value is ignored
while True:
(s1, s2, s3) = (True, True, True) # stages
# Make Components
new_components = None
try:
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
except StopIteration:
s1 = False
# Make Constraints
new_constraints = None
try:
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
except StopIteration:
s2 = False
# Run solver : sets components' world coordinates
if new_components or new_constraints:
self.solve()
# Make Alterations
try:
next(alterations_iter) # return value is ignored
except StopIteration:
s3 = False
# end loop when all iters are finished
if not any((s1, s2, s3)):
break
if recursive:
for (name, component) in self._components.items():
component.build(recursive=recursive)
|
[
"def",
"build",
"(",
"self",
",",
"recursive",
"=",
"True",
")",
":",
"# initialize values",
"self",
".",
"_components",
"=",
"{",
"}",
"self",
".",
"_constraints",
"=",
"[",
"]",
"def",
"genwrap",
"(",
"obj",
",",
"name",
",",
"iter_type",
"=",
"None",
")",
":",
"# Force obj to act like a generator.",
"# this wrapper will always yield at least once.",
"if",
"isinstance",
"(",
"obj",
",",
"GeneratorType",
")",
":",
"for",
"i",
"in",
"obj",
":",
"if",
"(",
"iter_type",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"isinstance",
"(",
"i",
",",
"iter_type",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"%s must yield a %r\"",
"%",
"(",
"name",
",",
"iter_type",
")",
")",
"yield",
"i",
"else",
":",
"if",
"(",
"iter_type",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"isinstance",
"(",
"obj",
",",
"iter_type",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"%s must return a %r\"",
"%",
"(",
"name",
",",
"iter_type",
")",
")",
"yield",
"obj",
"# Make Components",
"components_iter",
"=",
"genwrap",
"(",
"self",
".",
"make_components",
"(",
")",
",",
"\"make_components\"",
",",
"dict",
")",
"new_components",
"=",
"next",
"(",
"components_iter",
")",
"self",
".",
"verify_components",
"(",
"new_components",
")",
"self",
".",
"_components",
".",
"update",
"(",
"new_components",
")",
"# Make Constraints",
"constraints_iter",
"=",
"genwrap",
"(",
"self",
".",
"make_constraints",
"(",
")",
",",
"\"make_components\"",
",",
"list",
")",
"new_constraints",
"=",
"next",
"(",
"constraints_iter",
")",
"self",
".",
"verify_constraints",
"(",
"new_constraints",
")",
"self",
".",
"_constraints",
"+=",
"new_constraints",
"# Run solver : sets components' world coordinates",
"self",
".",
"solve",
"(",
")",
"# Make Alterations",
"alterations_iter",
"=",
"genwrap",
"(",
"self",
".",
"make_alterations",
"(",
")",
",",
"\"make_alterations\"",
")",
"next",
"(",
"alterations_iter",
")",
"# return value is ignored",
"while",
"True",
":",
"(",
"s1",
",",
"s2",
",",
"s3",
")",
"=",
"(",
"True",
",",
"True",
",",
"True",
")",
"# stages",
"# Make Components",
"new_components",
"=",
"None",
"try",
":",
"new_components",
"=",
"next",
"(",
"components_iter",
")",
"self",
".",
"verify_components",
"(",
"new_components",
")",
"self",
".",
"_components",
".",
"update",
"(",
"new_components",
")",
"except",
"StopIteration",
":",
"s1",
"=",
"False",
"# Make Constraints",
"new_constraints",
"=",
"None",
"try",
":",
"new_constraints",
"=",
"next",
"(",
"constraints_iter",
")",
"self",
".",
"verify_constraints",
"(",
"new_constraints",
")",
"self",
".",
"_constraints",
"+=",
"new_constraints",
"except",
"StopIteration",
":",
"s2",
"=",
"False",
"# Run solver : sets components' world coordinates",
"if",
"new_components",
"or",
"new_constraints",
":",
"self",
".",
"solve",
"(",
")",
"# Make Alterations",
"try",
":",
"next",
"(",
"alterations_iter",
")",
"# return value is ignored",
"except",
"StopIteration",
":",
"s3",
"=",
"False",
"# end loop when all iters are finished",
"if",
"not",
"any",
"(",
"(",
"s1",
",",
"s2",
",",
"s3",
")",
")",
":",
"break",
"if",
"recursive",
":",
"for",
"(",
"name",
",",
"component",
")",
"in",
"self",
".",
"_components",
".",
"items",
"(",
")",
":",
"component",
".",
"build",
"(",
"recursive",
"=",
"recursive",
")"
] | 36.6 | 20.844444 |
def removeCodedValue(self, name):
"""removes a codedValue by name"""
for i in self._codedValues:
if i['name'] == name:
self._codedValues.remove(i)
return True
return False
|
[
"def",
"removeCodedValue",
"(",
"self",
",",
"name",
")",
":",
"for",
"i",
"in",
"self",
".",
"_codedValues",
":",
"if",
"i",
"[",
"'name'",
"]",
"==",
"name",
":",
"self",
".",
"_codedValues",
".",
"remove",
"(",
"i",
")",
"return",
"True",
"return",
"False"
] | 33.285714 | 7.857143 |
def client_info(self, client):
"""
Get client info. Uses GET to /clients/<client> interface.
:Args:
* *client*: (str) Client's ID
:Returns: (dict) Client dictionary
"""
client = self._client_id(client)
response = self._get(url.clients_id.format(id=client))
self._check_response(response, 200)
return self._create_response(response)
|
[
"def",
"client_info",
"(",
"self",
",",
"client",
")",
":",
"client",
"=",
"self",
".",
"_client_id",
"(",
"client",
")",
"response",
"=",
"self",
".",
"_get",
"(",
"url",
".",
"clients_id",
".",
"format",
"(",
"id",
"=",
"client",
")",
")",
"self",
".",
"_check_response",
"(",
"response",
",",
"200",
")",
"return",
"self",
".",
"_create_response",
"(",
"response",
")"
] | 31.230769 | 13.538462 |
def get_sensor_data(**kwargs):
'''
Get sensor readings
Iterates sensor reading objects
:param kwargs:
- api_host=127.0.0.1
- api_user=admin
- api_pass=example
- api_port=623
- api_kg=None
CLI Example:
.. code-block:: bash
salt-call ipmi.get_sensor_data api_host=127.0.0.1 api_user=admin api_pass=pass
'''
import ast
with _IpmiCommand(**kwargs) as s:
data = {}
for reading in s.get_sensor_data():
if reading:
r = ast.literal_eval(repr(reading))
data[r.pop('name')] = r
return data
|
[
"def",
"get_sensor_data",
"(",
"*",
"*",
"kwargs",
")",
":",
"import",
"ast",
"with",
"_IpmiCommand",
"(",
"*",
"*",
"kwargs",
")",
"as",
"s",
":",
"data",
"=",
"{",
"}",
"for",
"reading",
"in",
"s",
".",
"get_sensor_data",
"(",
")",
":",
"if",
"reading",
":",
"r",
"=",
"ast",
".",
"literal_eval",
"(",
"repr",
"(",
"reading",
")",
")",
"data",
"[",
"r",
".",
"pop",
"(",
"'name'",
")",
"]",
"=",
"r",
"return",
"data"
] | 22.444444 | 22 |
def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
|
[
"def",
"get_patch_op",
"(",
"self",
",",
"keypath",
",",
"value",
",",
"op",
"=",
"'replace'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"return",
"{",
"'op'",
":",
"op",
",",
"'path'",
":",
"'/*/*/{}'",
".",
"format",
"(",
"keypath",
")",
",",
"'value'",
":",
"value",
"}"
] | 46.625 | 15.625 |
def next(self):
"""Return the next match; raises Exception if no next match available"""
# Check the state and find the next match as a side-effect if necessary.
if not self.has_next():
raise StopIteration("No next match")
# Don't retain that memory any longer than necessary.
result = self._last_match
self._last_match = None
self._state = PhoneNumberMatcher._NOT_READY
return result
|
[
"def",
"next",
"(",
"self",
")",
":",
"# Check the state and find the next match as a side-effect if necessary.",
"if",
"not",
"self",
".",
"has_next",
"(",
")",
":",
"raise",
"StopIteration",
"(",
"\"No next match\"",
")",
"# Don't retain that memory any longer than necessary.",
"result",
"=",
"self",
".",
"_last_match",
"self",
".",
"_last_match",
"=",
"None",
"self",
".",
"_state",
"=",
"PhoneNumberMatcher",
".",
"_NOT_READY",
"return",
"result"
] | 45.1 | 14.9 |
async def profile(self, ctx, platform, name):
'''Fetch a profile.'''
player = await self.client.get_player(platform, name)
solos = await player.get_solos()
await ctx.send("# of kills in solos for {}: {}".format(name,solos.kills.value))
|
[
"async",
"def",
"profile",
"(",
"self",
",",
"ctx",
",",
"platform",
",",
"name",
")",
":",
"player",
"=",
"await",
"self",
".",
"client",
".",
"get_player",
"(",
"platform",
",",
"name",
")",
"solos",
"=",
"await",
"player",
".",
"get_solos",
"(",
")",
"await",
"ctx",
".",
"send",
"(",
"\"# of kills in solos for {}: {}\"",
".",
"format",
"(",
"name",
",",
"solos",
".",
"kills",
".",
"value",
")",
")"
] | 37.571429 | 23.285714 |
def __obj2choices(self, values):
"""
- json list of key, value pairs:
Example: [["A", "Option 1 Label"], ["B", "Option 2 Label"]]
- space separated string with a list of options:
Example: "Option1 Opt2 Opt3"
will be converted to a key, value pair of the following form:
(("Option1", "Option1), ("Opt2", "Opt2"), ("Opt3", "Opt3"))
- an iterable of key, value pairs
"""
choices = values
# choices from string
if type(values) == type(''):
obj = None
# try json string
try:
obj = json.loads(values)
except ValueError:
obj = values
if type(obj) == type([]):
choices = obj
else:
choices = obj.split()
return choices
|
[
"def",
"__obj2choices",
"(",
"self",
",",
"values",
")",
":",
"choices",
"=",
"values",
"# choices from string",
"if",
"type",
"(",
"values",
")",
"==",
"type",
"(",
"''",
")",
":",
"obj",
"=",
"None",
"# try json string",
"try",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"values",
")",
"except",
"ValueError",
":",
"obj",
"=",
"values",
"if",
"type",
"(",
"obj",
")",
"==",
"type",
"(",
"[",
"]",
")",
":",
"choices",
"=",
"obj",
"else",
":",
"choices",
"=",
"obj",
".",
"split",
"(",
")",
"return",
"choices"
] | 35.083333 | 11.833333 |
def estimate_gaussian(X):
"""
Returns the mean and the variance of a data set of X points assuming that
the points come from a gaussian distribution X.
"""
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance)
|
[
"def",
"estimate_gaussian",
"(",
"X",
")",
":",
"mean",
"=",
"np",
".",
"mean",
"(",
"X",
",",
"0",
")",
"variance",
"=",
"np",
".",
"var",
"(",
"X",
",",
"0",
")",
"return",
"Gaussian",
"(",
"mean",
",",
"variance",
")"
] | 31.375 | 12.625 |
def filter(self, predicates):
"""Summary
Args:
grouping_column_name (TYPE): Description
Returns:
TYPE: Description
"""
tys = []
for col_name, raw_column in self.raw_columns.items():
dtype = str(raw_column.dtype)
if dtype == 'object' or dtype == '|S64':
weld_type = WeldVec(WeldChar())
else:
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
tys.append(weld_type)
if len(tys) == 1:
weld_type = tys[0]
else:
weld_type = WeldStruct(tys)
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return DataFrameWeldExpr(
grizzly_impl.filter(
grizzly_impl.zip_columns(
self.raw_columns.values(),
),
predicates
),
self.raw_columns.keys(),
weld_type
)
|
[
"def",
"filter",
"(",
"self",
",",
"predicates",
")",
":",
"tys",
"=",
"[",
"]",
"for",
"col_name",
",",
"raw_column",
"in",
"self",
".",
"raw_columns",
".",
"items",
"(",
")",
":",
"dtype",
"=",
"str",
"(",
"raw_column",
".",
"dtype",
")",
"if",
"dtype",
"==",
"'object'",
"or",
"dtype",
"==",
"'|S64'",
":",
"weld_type",
"=",
"WeldVec",
"(",
"WeldChar",
"(",
")",
")",
"else",
":",
"weld_type",
"=",
"grizzly_impl",
".",
"numpy_to_weld_type_mapping",
"[",
"dtype",
"]",
"tys",
".",
"append",
"(",
"weld_type",
")",
"if",
"len",
"(",
"tys",
")",
"==",
"1",
":",
"weld_type",
"=",
"tys",
"[",
"0",
"]",
"else",
":",
"weld_type",
"=",
"WeldStruct",
"(",
"tys",
")",
"if",
"isinstance",
"(",
"predicates",
",",
"SeriesWeld",
")",
":",
"predicates",
"=",
"predicates",
".",
"expr",
"return",
"DataFrameWeldExpr",
"(",
"grizzly_impl",
".",
"filter",
"(",
"grizzly_impl",
".",
"zip_columns",
"(",
"self",
".",
"raw_columns",
".",
"values",
"(",
")",
",",
")",
",",
"predicates",
")",
",",
"self",
".",
"raw_columns",
".",
"keys",
"(",
")",
",",
"weld_type",
")"
] | 27.166667 | 16.972222 |
def add_fast(self, filepath, hashfn=None, force=False):
"""
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
"""
if hashfn is None:
hashfn = fast_hashes
self.add(filepath, hashfn, force, shortcircuit=True)
|
[
"def",
"add_fast",
"(",
"self",
",",
"filepath",
",",
"hashfn",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"if",
"hashfn",
"is",
"None",
":",
"hashfn",
"=",
"fast_hashes",
"self",
".",
"add",
"(",
"filepath",
",",
"hashfn",
",",
"force",
",",
"shortcircuit",
"=",
"True",
")"
] | 46.444444 | 17.777778 |
async def heater_control(self, device_id, fan_status=None,
power_status=None):
"""Set heater temps."""
heater = self.heaters.get(device_id)
if heater is None:
_LOGGER.error("No such device")
return
if fan_status is None:
fan_status = heater.fan_status
if power_status is None:
power_status = heater.power_status
operation = 0 if fan_status == heater.fan_status else 4
payload = {"subDomain": heater.sub_domain,
"deviceId": device_id,
"testStatus": 1,
"operation": operation,
"status": power_status,
"windStatus": fan_status,
"holdTemp": heater.set_temp,
"tempType": 0,
"powerLevel": 0}
await self.request("deviceControl", payload)
|
[
"async",
"def",
"heater_control",
"(",
"self",
",",
"device_id",
",",
"fan_status",
"=",
"None",
",",
"power_status",
"=",
"None",
")",
":",
"heater",
"=",
"self",
".",
"heaters",
".",
"get",
"(",
"device_id",
")",
"if",
"heater",
"is",
"None",
":",
"_LOGGER",
".",
"error",
"(",
"\"No such device\"",
")",
"return",
"if",
"fan_status",
"is",
"None",
":",
"fan_status",
"=",
"heater",
".",
"fan_status",
"if",
"power_status",
"is",
"None",
":",
"power_status",
"=",
"heater",
".",
"power_status",
"operation",
"=",
"0",
"if",
"fan_status",
"==",
"heater",
".",
"fan_status",
"else",
"4",
"payload",
"=",
"{",
"\"subDomain\"",
":",
"heater",
".",
"sub_domain",
",",
"\"deviceId\"",
":",
"device_id",
",",
"\"testStatus\"",
":",
"1",
",",
"\"operation\"",
":",
"operation",
",",
"\"status\"",
":",
"power_status",
",",
"\"windStatus\"",
":",
"fan_status",
",",
"\"holdTemp\"",
":",
"heater",
".",
"set_temp",
",",
"\"tempType\"",
":",
"0",
",",
"\"powerLevel\"",
":",
"0",
"}",
"await",
"self",
".",
"request",
"(",
"\"deviceControl\"",
",",
"payload",
")"
] | 41 | 7.863636 |
def _get_metadata(network_id, user_id):
"""
Get all the metadata in a network, across all scenarios
returns a dictionary of dict objects, keyed on dataset ID
"""
log.info("Getting Metadata")
dataset_qry = db.DBSession.query(
Dataset
).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter(
or_(Dataset.hidden=='N', DatasetOwner.user_id != None),
Scenario.id==ResourceScenario.scenario_id,
Scenario.network_id==network_id,
Dataset.id==ResourceScenario.dataset_id).distinct().subquery()
rs_qry = db.DBSession.query(
Metadata
).join(dataset_qry, Metadata.dataset_id==dataset_qry.c.id)
x = time.time()
logging.info("Getting all matadata")
all_metadata = db.DBSession.execute(rs_qry.statement).fetchall()
log.info("%s metadata jointly retrieved in %s",len(all_metadata), time.time()-x)
logging.info("metadata retrieved. Processing results...")
x = time.time()
metadata_dict = dict()
for m in all_metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id][m.key] = six.text_type(m.value)
else:
metadata_dict[m.dataset_id] = {m.key : six.text_type(m.value)}
logging.info("metadata processed in %s", time.time()-x)
return metadata_dict
|
[
"def",
"_get_metadata",
"(",
"network_id",
",",
"user_id",
")",
":",
"log",
".",
"info",
"(",
"\"Getting Metadata\"",
")",
"dataset_qry",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Dataset",
")",
".",
"outerjoin",
"(",
"DatasetOwner",
",",
"and_",
"(",
"DatasetOwner",
".",
"dataset_id",
"==",
"Dataset",
".",
"id",
",",
"DatasetOwner",
".",
"user_id",
"==",
"user_id",
")",
")",
".",
"filter",
"(",
"or_",
"(",
"Dataset",
".",
"hidden",
"==",
"'N'",
",",
"DatasetOwner",
".",
"user_id",
"!=",
"None",
")",
",",
"Scenario",
".",
"id",
"==",
"ResourceScenario",
".",
"scenario_id",
",",
"Scenario",
".",
"network_id",
"==",
"network_id",
",",
"Dataset",
".",
"id",
"==",
"ResourceScenario",
".",
"dataset_id",
")",
".",
"distinct",
"(",
")",
".",
"subquery",
"(",
")",
"rs_qry",
"=",
"db",
".",
"DBSession",
".",
"query",
"(",
"Metadata",
")",
".",
"join",
"(",
"dataset_qry",
",",
"Metadata",
".",
"dataset_id",
"==",
"dataset_qry",
".",
"c",
".",
"id",
")",
"x",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"\"Getting all matadata\"",
")",
"all_metadata",
"=",
"db",
".",
"DBSession",
".",
"execute",
"(",
"rs_qry",
".",
"statement",
")",
".",
"fetchall",
"(",
")",
"log",
".",
"info",
"(",
"\"%s metadata jointly retrieved in %s\"",
",",
"len",
"(",
"all_metadata",
")",
",",
"time",
".",
"time",
"(",
")",
"-",
"x",
")",
"logging",
".",
"info",
"(",
"\"metadata retrieved. Processing results...\"",
")",
"x",
"=",
"time",
".",
"time",
"(",
")",
"metadata_dict",
"=",
"dict",
"(",
")",
"for",
"m",
"in",
"all_metadata",
":",
"if",
"metadata_dict",
".",
"get",
"(",
"m",
".",
"dataset_id",
")",
":",
"metadata_dict",
"[",
"m",
".",
"dataset_id",
"]",
"[",
"m",
".",
"key",
"]",
"=",
"six",
".",
"text_type",
"(",
"m",
".",
"value",
")",
"else",
":",
"metadata_dict",
"[",
"m",
".",
"dataset_id",
"]",
"=",
"{",
"m",
".",
"key",
":",
"six",
".",
"text_type",
"(",
"m",
".",
"value",
")",
"}",
"logging",
".",
"info",
"(",
"\"metadata processed in %s\"",
",",
"time",
".",
"time",
"(",
")",
"-",
"x",
")",
"return",
"metadata_dict"
] | 39.542857 | 22.342857 |
def user(session, uid, ladder_ids=None):
"""Get all possible user info by name."""
data = get_user(session, uid)
resp = dict(data)
if not ladder_ids:
return resp
resp['ladders'] = {}
for ladder_id in ladder_ids:
if isinstance(ladder_id, str):
ladder_id = lookup_ladder_id(ladder_id)
try:
ladder_data = dict(get_ladder(session, ladder_id, user_id=uid))
resp['ladders'][ladder_id] = ladder_data
except VooblyError:
# No ranking on ladder
pass
return resp
|
[
"def",
"user",
"(",
"session",
",",
"uid",
",",
"ladder_ids",
"=",
"None",
")",
":",
"data",
"=",
"get_user",
"(",
"session",
",",
"uid",
")",
"resp",
"=",
"dict",
"(",
"data",
")",
"if",
"not",
"ladder_ids",
":",
"return",
"resp",
"resp",
"[",
"'ladders'",
"]",
"=",
"{",
"}",
"for",
"ladder_id",
"in",
"ladder_ids",
":",
"if",
"isinstance",
"(",
"ladder_id",
",",
"str",
")",
":",
"ladder_id",
"=",
"lookup_ladder_id",
"(",
"ladder_id",
")",
"try",
":",
"ladder_data",
"=",
"dict",
"(",
"get_ladder",
"(",
"session",
",",
"ladder_id",
",",
"user_id",
"=",
"uid",
")",
")",
"resp",
"[",
"'ladders'",
"]",
"[",
"ladder_id",
"]",
"=",
"ladder_data",
"except",
"VooblyError",
":",
"# No ranking on ladder",
"pass",
"return",
"resp"
] | 32.705882 | 14.411765 |
def execute(self, uri, namespace, action, timeout=2, **kwargs):
"""Executes a given action with optional arguments.
The execution of an action of an UPnP/TR64 device needs more than just the name of an action. It needs the
control URI which is called to place the action and also the namespace aka service type is needed. The
namespace defines the scope or service type of the given action, the same action name can appear in different
namespaces.
The way how to obtain the needed information's is either through the documentation of the vendor of the
device. Or through a discovery requests which return's the URL to the root device description XML.
:param str uri: the control URI, for example ``/upnp/control/hosts``
:param str namespace: the namespace for the given action, for example ``urn:dslforum-org:service:Hosts:1``
:param str action: the name of the action to call, for example ``GetGenericHostEntry``
:param float timeout: the timeout to wait for the action to be executed
:param kwargs: optional arguments for the given action, depends if the action needs parameter. The arguments
are given as dict where the key is the parameter name and the value the value of the parameter.
:type kwargs: dict[str, str]
:return: returns the results of the action, if any. The results are structured as dict where the key is the
name of the result argument and the value is the value of the result.
:rtype: dict[str,str]
:raises ValueError: if parameters are not set correctly
:raises requests.exceptions.ConnectionError: when the action can not be placed on the device
:raises requests.exceptions.ConnectTimeout: when download time out
Example:
::
device = DeviceTR64(...)
device.execute("/upnp/control/hosts", "urn:dslforum-org:service:Hosts:1",
"GetGenericHostEntry", {"NewIndex": 1})
{'NewActive': '0', 'NewIPAddress': '192.168.0.23', 'NewMACAddress': '9C:20:7B:E7:FF:5F',
'NewInterfaceType': 'Ethernet', 'NewHostName': 'Apple-TV', 'NewAddressSource': 'DHCP',
'NewLeaseTimeRemaining': '0'}
.. seealso::
`Additional short explanation of the UPnP protocol <http://www.upnp-hacks.org/upnp.html>`_
:class:`~simpletr64.Discover`, :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions`,
:meth:`~simpletr64.DeviceTR64.loadSCPD`
"""
if not uri:
raise ValueError("No action URI has been defined.")
if not namespace:
raise ValueError("No namespace has been defined.")
if not action:
raise ValueError("No action has been defined.")
# soap headers
header = {'Content-Type': 'text/xml; charset="UTF-8"',
'Soapaction': '"' + namespace + "#" + action + '"'}
# build SOAP body
body = '''<?xml version="1.0" encoding="UTF-8"?>
<s:Envelope
s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<s:Header/>
<s:Body>\n'''
body += " <u:" + action + ' xmlns="' + namespace + '">\n'
arguments = {}
for key in kwargs.keys():
body += " <" + key + ">" + str(kwargs[key]) + "</" + key + ">\n"
arguments[key] = str(kwargs[key])
body += " </u:" + action + ">\n"
body += '''</s:Body>
</s:Envelope>'''
# setup proxies
proxies = {}
if self.__httpsProxy:
proxies = {"https": self.__httpsProxy}
if self.__httpProxy:
proxies = {"http": self.__httpProxy}
# setup authentication
auth = None
if self.__password:
auth = HTTPDigestAuth(self.__username, self.__password)
# build the URL
location = self.__protocol + "://" + self.__hostname + ":" + str(self.port) + uri
# Post http request
request = requests.post(location, data=body, headers=header, auth=auth, proxies=proxies, timeout=float(timeout),
verify=self.__verify)
if request.status_code != 200:
errorStr = DeviceTR64._extractErrorString(request)
raise ValueError('Could not execute "' + action + str(arguments) + '": ' + str(request.status_code) +
' - ' + request.reason + " -- " + errorStr)
# parse XML return
try:
root = ET.fromstring(request.text.encode('utf-8'))
except Exception as e:
raise ValueError("Can not parse results for the action: " + str(e))
# iterate in the XML structure to get the action result
actionNode = root[0][0]
# we need to remove XML namespace for the action node
namespaceLength = len(namespace) + 2 # add braces
tag = actionNode.tag[namespaceLength:]
if tag != (action + "Response"):
raise ValueError('Soap result structure is wrong, expected action "' + action +
'Response" got "' + tag + '".')
# pack all the received results
results = {}
for resultNode in actionNode:
results[resultNode.tag] = resultNode.text
return results
|
[
"def",
"execute",
"(",
"self",
",",
"uri",
",",
"namespace",
",",
"action",
",",
"timeout",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"uri",
":",
"raise",
"ValueError",
"(",
"\"No action URI has been defined.\"",
")",
"if",
"not",
"namespace",
":",
"raise",
"ValueError",
"(",
"\"No namespace has been defined.\"",
")",
"if",
"not",
"action",
":",
"raise",
"ValueError",
"(",
"\"No action has been defined.\"",
")",
"# soap headers",
"header",
"=",
"{",
"'Content-Type'",
":",
"'text/xml; charset=\"UTF-8\"'",
",",
"'Soapaction'",
":",
"'\"'",
"+",
"namespace",
"+",
"\"#\"",
"+",
"action",
"+",
"'\"'",
"}",
"# build SOAP body",
"body",
"=",
"'''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<s:Envelope\n s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"\n xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <s:Header/>\n <s:Body>\\n'''",
"body",
"+=",
"\" <u:\"",
"+",
"action",
"+",
"' xmlns=\"'",
"+",
"namespace",
"+",
"'\">\\n'",
"arguments",
"=",
"{",
"}",
"for",
"key",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"body",
"+=",
"\" <\"",
"+",
"key",
"+",
"\">\"",
"+",
"str",
"(",
"kwargs",
"[",
"key",
"]",
")",
"+",
"\"</\"",
"+",
"key",
"+",
"\">\\n\"",
"arguments",
"[",
"key",
"]",
"=",
"str",
"(",
"kwargs",
"[",
"key",
"]",
")",
"body",
"+=",
"\" </u:\"",
"+",
"action",
"+",
"\">\\n\"",
"body",
"+=",
"'''</s:Body>\n</s:Envelope>'''",
"# setup proxies",
"proxies",
"=",
"{",
"}",
"if",
"self",
".",
"__httpsProxy",
":",
"proxies",
"=",
"{",
"\"https\"",
":",
"self",
".",
"__httpsProxy",
"}",
"if",
"self",
".",
"__httpProxy",
":",
"proxies",
"=",
"{",
"\"http\"",
":",
"self",
".",
"__httpProxy",
"}",
"# setup authentication",
"auth",
"=",
"None",
"if",
"self",
".",
"__password",
":",
"auth",
"=",
"HTTPDigestAuth",
"(",
"self",
".",
"__username",
",",
"self",
".",
"__password",
")",
"# build the URL",
"location",
"=",
"self",
".",
"__protocol",
"+",
"\"://\"",
"+",
"self",
".",
"__hostname",
"+",
"\":\"",
"+",
"str",
"(",
"self",
".",
"port",
")",
"+",
"uri",
"# Post http request",
"request",
"=",
"requests",
".",
"post",
"(",
"location",
",",
"data",
"=",
"body",
",",
"headers",
"=",
"header",
",",
"auth",
"=",
"auth",
",",
"proxies",
"=",
"proxies",
",",
"timeout",
"=",
"float",
"(",
"timeout",
")",
",",
"verify",
"=",
"self",
".",
"__verify",
")",
"if",
"request",
".",
"status_code",
"!=",
"200",
":",
"errorStr",
"=",
"DeviceTR64",
".",
"_extractErrorString",
"(",
"request",
")",
"raise",
"ValueError",
"(",
"'Could not execute \"'",
"+",
"action",
"+",
"str",
"(",
"arguments",
")",
"+",
"'\": '",
"+",
"str",
"(",
"request",
".",
"status_code",
")",
"+",
"' - '",
"+",
"request",
".",
"reason",
"+",
"\" -- \"",
"+",
"errorStr",
")",
"# parse XML return",
"try",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"request",
".",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Can not parse results for the action: \"",
"+",
"str",
"(",
"e",
")",
")",
"# iterate in the XML structure to get the action result",
"actionNode",
"=",
"root",
"[",
"0",
"]",
"[",
"0",
"]",
"# we need to remove XML namespace for the action node",
"namespaceLength",
"=",
"len",
"(",
"namespace",
")",
"+",
"2",
"# add braces",
"tag",
"=",
"actionNode",
".",
"tag",
"[",
"namespaceLength",
":",
"]",
"if",
"tag",
"!=",
"(",
"action",
"+",
"\"Response\"",
")",
":",
"raise",
"ValueError",
"(",
"'Soap result structure is wrong, expected action \"'",
"+",
"action",
"+",
"'Response\" got \"'",
"+",
"tag",
"+",
"'\".'",
")",
"# pack all the received results",
"results",
"=",
"{",
"}",
"for",
"resultNode",
"in",
"actionNode",
":",
"results",
"[",
"resultNode",
".",
"tag",
"]",
"=",
"resultNode",
".",
"text",
"return",
"results"
] | 42.03125 | 30.515625 |
def likelihood3(args):
"""
%prog likelihood3 140_20.json 140_70.json
Plot the likelihood surface and marginal distributions for two settings.
"""
from matplotlib import gridspec
p = OptionParser(likelihood3.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="10x10",
style="white", cmap="coolwarm")
if len(args) != 2:
sys.exit(not p.print_help())
jsonfile1, jsonfile2 = args
fig = plt.figure(figsize=(iopts.w, iopts.h))
gs = gridspec.GridSpec(9, 2)
ax1 = fig.add_subplot(gs[:4, 0])
ax2 = fig.add_subplot(gs[:2, 1])
ax3 = fig.add_subplot(gs[2:4, 1])
ax4 = fig.add_subplot(gs[5:, 0])
ax5 = fig.add_subplot(gs[5:7, 1])
ax6 = fig.add_subplot(gs[7:, 1])
plt.tight_layout(pad=2)
plot_panel(jsonfile1, ax1, ax2, ax3, opts.cmap)
plot_panel(jsonfile2, ax4, ax5, ax6, opts.cmap)
root = fig.add_axes([0, 0, 1, 1])
pad = .02
panel_labels(root, ((pad, 1 - pad, "A"), (pad, 4. / 9, "B")))
normalize_axes(root)
image_name = "likelihood3." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
[
"def",
"likelihood3",
"(",
"args",
")",
":",
"from",
"matplotlib",
"import",
"gridspec",
"p",
"=",
"OptionParser",
"(",
"likelihood3",
".",
"__doc__",
")",
"opts",
",",
"args",
",",
"iopts",
"=",
"p",
".",
"set_image_options",
"(",
"args",
",",
"figsize",
"=",
"\"10x10\"",
",",
"style",
"=",
"\"white\"",
",",
"cmap",
"=",
"\"coolwarm\"",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"jsonfile1",
",",
"jsonfile2",
"=",
"args",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"iopts",
".",
"w",
",",
"iopts",
".",
"h",
")",
")",
"gs",
"=",
"gridspec",
".",
"GridSpec",
"(",
"9",
",",
"2",
")",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
":",
"4",
",",
"0",
"]",
")",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
":",
"2",
",",
"1",
"]",
")",
"ax3",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"2",
":",
"4",
",",
"1",
"]",
")",
"ax4",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"5",
":",
",",
"0",
"]",
")",
"ax5",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"5",
":",
"7",
",",
"1",
"]",
")",
"ax6",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"7",
":",
",",
"1",
"]",
")",
"plt",
".",
"tight_layout",
"(",
"pad",
"=",
"2",
")",
"plot_panel",
"(",
"jsonfile1",
",",
"ax1",
",",
"ax2",
",",
"ax3",
",",
"opts",
".",
"cmap",
")",
"plot_panel",
"(",
"jsonfile2",
",",
"ax4",
",",
"ax5",
",",
"ax6",
",",
"opts",
".",
"cmap",
")",
"root",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0",
",",
"0",
",",
"1",
",",
"1",
"]",
")",
"pad",
"=",
".02",
"panel_labels",
"(",
"root",
",",
"(",
"(",
"pad",
",",
"1",
"-",
"pad",
",",
"\"A\"",
")",
",",
"(",
"pad",
",",
"4.",
"/",
"9",
",",
"\"B\"",
")",
")",
")",
"normalize_axes",
"(",
"root",
")",
"image_name",
"=",
"\"likelihood3.\"",
"+",
"iopts",
".",
"format",
"savefig",
"(",
"image_name",
",",
"dpi",
"=",
"iopts",
".",
"dpi",
",",
"iopts",
"=",
"iopts",
")"
] | 31.828571 | 15.6 |
def GC_partial(portion):
"""Manually compute GC content percentage in a DNA string, taking
ambiguous values into account (according to standard IUPAC notation).
"""
sequence_count = collections.Counter(portion)
gc = ((sum([sequence_count[i] for i in 'gGcCsS']) +
sum([sequence_count[i] for i in 'DdHh']) / 3.0 +
2 * sum([sequence_count[i] for i in 'VvBb']) / 3.0 +
sum([sequence_count[i] for i in 'NnYyRrKkMm']) / 2.0) / len(portion))
return 0 or 100 * gc
|
[
"def",
"GC_partial",
"(",
"portion",
")",
":",
"sequence_count",
"=",
"collections",
".",
"Counter",
"(",
"portion",
")",
"gc",
"=",
"(",
"(",
"sum",
"(",
"[",
"sequence_count",
"[",
"i",
"]",
"for",
"i",
"in",
"'gGcCsS'",
"]",
")",
"+",
"sum",
"(",
"[",
"sequence_count",
"[",
"i",
"]",
"for",
"i",
"in",
"'DdHh'",
"]",
")",
"/",
"3.0",
"+",
"2",
"*",
"sum",
"(",
"[",
"sequence_count",
"[",
"i",
"]",
"for",
"i",
"in",
"'VvBb'",
"]",
")",
"/",
"3.0",
"+",
"sum",
"(",
"[",
"sequence_count",
"[",
"i",
"]",
"for",
"i",
"in",
"'NnYyRrKkMm'",
"]",
")",
"/",
"2.0",
")",
"/",
"len",
"(",
"portion",
")",
")",
"return",
"0",
"or",
"100",
"*",
"gc"
] | 45.454545 | 18.909091 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.