text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Create a CommandResult for a request response.
<END_TASK>
<USER_TASK:>
Description:
def _result(self, command_response, log=None):
"""Create a CommandResult for a request response.
:param command_response: command request response
:type command_response: dict
:param log: list of log messages (optional)
:type log: list
:return: a CommandResult containing any given log messages
:rtype: :py:class:`vici.session.CommandResult`
""" |
if command_response["success"] == "yes":
return CommandResult(True, None, log)
else:
return CommandResult(False, command_response["errmsg"], log) |
<SYSTEM_TASK:>
Send command request with an optional message.
<END_TASK>
<USER_TASK:>
Description:
def request(self, command, message=None):
"""Send command request with an optional message.
:param command: command to send
:type command: str
:param message: message (optional)
:type message: str
:return: command result
:rtype: dict
""" |
if message is not None:
message = Message.serialize(message)
packet = Packet.request(command, message)
response = self._communicate(packet)
if response.response_type != Packet.CMD_RESPONSE:
raise SessionException(
"Unexpected response type {type}, "
"expected '{response}' (CMD_RESPONSE)".format(
type=response.response_type,
response=Packet.CMD_RESPONSE
)
)
return Message.deserialize(response.payload) |
<SYSTEM_TASK:>
Send command request and collect and return all emitted events.
<END_TASK>
<USER_TASK:>
Description:
def streamed_request(self, command, event_stream_type, message=None):
"""Send command request and collect and return all emitted events.
:param command: command to send
:type command: str
:param event_stream_type: event type emitted on command execution
:type event_stream_type: str
:param message: message (optional)
:type message: str
:return: a pair of the command result and a list of emitted events
:rtype: tuple
""" |
result = []
if message is not None:
message = Message.serialize(message)
# subscribe to event stream
packet = Packet.register_event(event_stream_type)
response = self._communicate(packet)
if response.response_type != Packet.EVENT_CONFIRM:
raise SessionException(
"Unexpected response type {type}, "
"expected '{confirm}' (EVENT_CONFIRM)".format(
type=response.response_type,
confirm=Packet.EVENT_CONFIRM,
)
)
# issue command, and read any event messages
packet = Packet.request(command, message)
self.transport.send(packet)
response = self._read()
while response.response_type == Packet.EVENT:
result.append(Message.deserialize(response.payload))
response = self._read()
if response.response_type == Packet.CMD_RESPONSE:
response_message = Message.deserialize(response.payload)
else:
raise SessionException(
"Unexpected response type {type}, "
"expected '{response}' (CMD_RESPONSE)".format(
type=response.response_type,
response=Packet.CMD_RESPONSE
)
)
# unsubscribe from event stream
packet = Packet.unregister_event(event_stream_type)
response = self._communicate(packet)
if response.response_type != Packet.EVENT_CONFIRM:
raise SessionException(
"Unexpected response type {type}, "
"expected '{confirm}' (EVENT_CONFIRM)".format(
type=response.response_type,
confirm=Packet.EVENT_CONFIRM,
)
)
return (response_message, result) |
<SYSTEM_TASK:>
Get next packet from transport.
<END_TASK>
<USER_TASK:>
Description:
def _read(self):
"""Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
""" |
raw_response = self.transport.receive()
response = Packet.parse(raw_response)
# FIXME
if response.response_type == Packet.EVENT and response.event_type == "log":
# queue up any debug log messages, and get next
self.log_events.append(response)
# do something?
self._read()
else:
return response |
<SYSTEM_TASK:>
Ensure the rect spec is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_bounding_rect(rect_pos):
"""Ensure the rect spec is valid.""" |
if not isinstance(rect_pos, Iterable):
raise ValueError('rectangle spect must be a tuple of floats '
'specifying (left, right, width, height)')
left, bottom, width, height = rect_pos
for val, name in zip((left, bottom, width, height),
('left', 'bottom', 'width', 'height')):
if val < 0.0 or val > 1.0:
raise ValueError("{}'s value must be >=0 and <= 1.0. "
"It is now {}".format(name, val))
if left + width > 1.0:
print('rect would extend beyond the width of figure/axis by {}'.format(left + width - 1.0))
if bottom + height > 1.0:
print('rect would extend beyond the height of figure/axis by {}'.format(
bottom + height - 1.0))
return rect_pos |
<SYSTEM_TASK:>
Ensures requested number of slices is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_num_slices(num_slices, img_shape=None, num_dims=3):
"""Ensures requested number of slices is valid.
Atleast 1 and atmost the image size, if available
""" |
if not isinstance(num_slices, Iterable) or len(num_slices) == 1:
num_slices = np.repeat(num_slices, num_dims)
if img_shape is not None:
if len(num_slices) != len(img_shape):
raise ValueError('The number of dimensions requested is different from image.'
' Must be either 1 or equal to {}'.format(len(img_shape) + 1))
# upper bounding them to image shape
num_slices = np.minimum(img_shape, num_slices)
# lower bounding it to 1
return np.maximum(1, num_slices) |
<SYSTEM_TASK:>
Validation and typecasting.
<END_TASK>
<USER_TASK:>
Description:
def check_int(num,
num_descr='number',
min_value=0,
max_value=np.Inf):
"""Validation and typecasting.""" |
if not np.isfinite(num) or num < min_value or num > max_value:
raise ValueError('{}={} is not finite or '
'is not >= {} or '
'is not < {}'.format(num_descr, num, min_value, max_value))
return int(num) |
<SYSTEM_TASK:>
Image reader, with additional checks on size.
<END_TASK>
<USER_TASK:>
Description:
def read_image(img_spec, bkground_thresh, ensure_num_dim=3):
"""Image reader, with additional checks on size.
Can optionally remove stray values close to zero (smaller than 5 %ile).""" |
img = load_image_from_disk(img_spec)
if not np.issubdtype(img.dtype, np.floating):
img = img.astype('float32')
if ensure_num_dim == 3:
img = check_image_is_3d(img)
elif ensure_num_dim == 4:
img = check_image_is_4d(img)
return threshold_image(img, bkground_thresh) |
<SYSTEM_TASK:>
Thresholds a given image at a value or percentile.
<END_TASK>
<USER_TASK:>
Description:
def threshold_image(img, bkground_thresh, bkground_value=0.0):
"""
Thresholds a given image at a value or percentile.
Replacement value can be specified too.
Parameters
-----------
image_in : ndarray
Input image
bkground_thresh : float
a threshold value to identify the background
bkground_value : float
a value to fill the background elements with. Default 0.
Returns
-------
thresholded_image : ndarray
thresholded and/or filled image
""" |
if bkground_thresh is None:
return img
if isinstance(bkground_thresh, str):
try:
thresh_perc = float(bkground_thresh.replace('%', ''))
except:
raise ValueError(
'percentile specified could not be parsed correctly '
' - must be a string of the form "5%", "10%" etc')
else:
thresh_value = np.percentile(img, thresh_perc)
elif isinstance(bkground_thresh, (float, int)):
thresh_value = bkground_thresh
else:
raise ValueError('Invalid specification for background threshold.')
img[img < thresh_value] = bkground_value
return img |
<SYSTEM_TASK:>
Row-wise rescale of a given matrix.
<END_TASK>
<USER_TASK:>
Description:
def row_wise_rescale(matrix):
"""
Row-wise rescale of a given matrix.
For fMRI data (num_voxels x num_time_points), this would translate to voxel-wise normalization over time.
Parameters
----------
matrix : ndarray
Input rectangular matrix, typically a carpet of size num_voxels x num_4th_dim, 4th_dim could be time points or gradients or other appropriate
Returns
-------
normed : ndarray
normalized matrix
""" |
if matrix.shape[0] <= matrix.shape[1]:
raise ValueError('Number of voxels is less than the number of time points!! '
'Are you sure data is reshaped correctly?')
min_ = matrix.min(axis=1)
range_ = matrix.ptp(axis=1) # ptp : peak to peak, max-min
min_tile = np.tile(min_, (matrix.shape[1], 1)).T
range_tile = np.tile(range_, (matrix.shape[1], 1)).T
# avoiding any numerical difficulties
range_tile[range_tile < np.finfo(np.float).eps] = 1.0
normed = (matrix - min_tile) / range_tile
del min_, range_, min_tile, range_tile
return normed |
<SYSTEM_TASK:>
Crop the images to ensure both fit within the bounding box
<END_TASK>
<USER_TASK:>
Description:
def crop_to_extents(img1, img2, padding):
"""Crop the images to ensure both fit within the bounding box""" |
beg_coords1, end_coords1 = crop_coords(img1, padding)
beg_coords2, end_coords2 = crop_coords(img2, padding)
beg_coords = np.fmin(beg_coords1, beg_coords2)
end_coords = np.fmax(end_coords1, end_coords2)
img1 = crop_3dimage(img1, beg_coords, end_coords)
img2 = crop_3dimage(img2, beg_coords, end_coords)
return img1, img2 |
<SYSTEM_TASK:>
Find coordinates describing extent of non-zero portion of image, padded
<END_TASK>
<USER_TASK:>
Description:
def crop_coords(img, padding):
"""Find coordinates describing extent of non-zero portion of image, padded""" |
coords = np.nonzero(img)
empty_axis_exists = np.any([len(arr) == 0 for arr in coords])
if empty_axis_exists:
end_coords = img.shape
beg_coords = np.zeros((1, img.ndim)).astype(int).flatten()
else:
min_coords = np.array([arr.min() for arr in coords])
max_coords = np.array([arr.max() for arr in coords])
beg_coords = np.fmax(0, min_coords - padding)
end_coords = np.fmin(img.shape, max_coords + padding)
return beg_coords, end_coords |
<SYSTEM_TASK:>
verifies the sampler requested is valid.
<END_TASK>
<USER_TASK:>
Description:
def verify_sampler(sampler, image, image_shape, view_set, num_slices):
"""verifies the sampler requested is valid.""" |
if isinstance(sampler, str):
sampler = sampler.lower()
if sampler not in ['linear', ]:
raise ValueError('Sampling strategy: {} not implemented.'.format(sampler))
out_sampler = sampler
out_sampling_method = 'linear'
elif isinstance(sampler, Iterable):
if any([index < 0 or index > 100 for index in sampler]):
raise ValueError('sampling percentages must be in [0-100]% range')
if len(sampler) > min(num_slices):
num_slices = np.maximum(num_slices, len(sampler))
out_sampler = np.array(sampler)
out_sampling_method = 'percentage'
elif callable(sampler):
# checking if the callable returns a bool
for view in view_set:
middle_slice = int(image_shape[view] / 2)
if not isinstance(sampler(get_axis(image, view, middle_slice)), bool):
raise ValueError('sampler callable must return a boolean value (True/False)')
out_sampler = sampler
out_sampling_method = 'callable'
else:
raise NotImplementedError('Invalid choice for sampler! Choose one of: '
'linear, percentage or callable')
return out_sampler, out_sampling_method, num_slices |
<SYSTEM_TASK:>
Returns username maximum length as supported by Django.
<END_TASK>
<USER_TASK:>
Description:
def get_username_max_len():
"""Returns username maximum length as supported by Django.
:rtype: int
""" |
fields = [field for field in USER._meta.fields if field.name == 'username']
try:
length = fields[0].max_length
except IndexError:
length = 30
return length |
<SYSTEM_TASK:>
Read environment variables based on the settings defined in
<END_TASK>
<USER_TASK:>
Description:
def _get_env(self, config):
""" Read environment variables based on the settings defined in
the defaults. These are expected to be upper-case versions of
the actual setting names, prefixed by ``SCRAPEKIT_``. """ |
for option, value in config.items():
env_name = 'SCRAPEKIT_%s' % option.upper()
value = os.environ.get(env_name, value)
config[option] = value
return config |
<SYSTEM_TASK:>
Initialize the queue and the threads.
<END_TASK>
<USER_TASK:>
Description:
def _spawn(self):
""" Initialize the queue and the threads. """ |
self.queue = Queue(maxsize=self.num_threads * 10)
for i in range(self.num_threads):
t = Thread(target=self._consume)
t.daemon = True
t.start() |
<SYSTEM_TASK:>
Main loop for each thread, handles picking a task off the
<END_TASK>
<USER_TASK:>
Description:
def _consume(self):
""" Main loop for each thread, handles picking a task off the
queue, processing it and notifying the queue that it is done.
""" |
while True:
try:
task, args, kwargs = self.queue.get(True)
task(*args, **kwargs)
finally:
self.queue.task_done() |
<SYSTEM_TASK:>
Add a new item to the queue. An item is a task and the
<END_TASK>
<USER_TASK:>
Description:
def put(self, task, args, kwargs):
""" Add a new item to the queue. An item is a task and the
arguments needed to call it.
Do not call this directly, use Task.queue/Task.run instead.
""" |
if self.num_threads == 0:
return task(*args, **kwargs)
if self.queue is None:
self._spawn()
self.queue.put((task, args, kwargs)) |
<SYSTEM_TASK:>
Queue a first item to execute, then wait for the queue to
<END_TASK>
<USER_TASK:>
Description:
def run(self, *args, **kwargs):
""" Queue a first item to execute, then wait for the queue to
be empty before returning. This should be the default way of
starting any scraper.
""" |
if self._source is not None:
return self._source.run(*args, **kwargs)
else:
self.queue(*args, **kwargs)
return self.wait() |
<SYSTEM_TASK:>
Add a chain listener to the execution of this task. Whenever
<END_TASK>
<USER_TASK:>
Description:
def chain(self, other_task):
""" Add a chain listener to the execution of this task. Whenever
an item has been processed by the task, the registered listener
task will be queued to be executed with the output of this task.
Can also be written as::
pipeline = task1 > task2
""" |
other_task._source = self
self._listeners.append(ChainListener(other_task))
return other_task |
<SYSTEM_TASK:>
Add a pipe listener to the execution of this task. The
<END_TASK>
<USER_TASK:>
Description:
def pipe(self, other_task):
""" Add a pipe listener to the execution of this task. The
output of this task is required to be an iterable. Each item in
the iterable will be queued as the sole argument to an execution
of the listener task.
Can also be written as::
pipeline = task1 | task2
""" |
other_task._source = self
self._listeners.append(PipeListener(other_task))
return other_task |
<SYSTEM_TASK:>
Generate an XML element with client auth data populated.
<END_TASK>
<USER_TASK:>
Description:
def client_auth(self):
"""Generate an XML element with client auth data populated.""" |
if not self._client_auth:
self._client_auth = E.Element('merchantAuthentication')
E.SubElement(self._client_auth, 'name').text = self.config.login_id
E.SubElement(self._client_auth, 'transactionKey').text = self.config.transaction_key
return self._client_auth |
<SYSTEM_TASK:>
Factory method for generating the base XML requests.
<END_TASK>
<USER_TASK:>
Description:
def _base_request(self, method):
"""Factory method for generating the base XML requests.""" |
request = E.Element(method)
request.set('xmlns', 'AnetApi/xml/v1/schema/AnetApiSchema.xsd')
request.append(self.client_auth)
return request |
<SYSTEM_TASK:>
Make a call to the Authorize.net server with the XML.
<END_TASK>
<USER_TASK:>
Description:
def _make_call(self, call):
"""Make a call to the Authorize.net server with the XML.""" |
try:
request = urllib2.Request(self.config.environment, E.tostring(call))
request.add_header('Content-Type', 'text/xml')
response = urllib2.urlopen(request).read()
response = E.fromstring(response)
response_json = parse_response(response)
except urllib2.HTTPError:
raise AuthorizeConnectionError('Error processing XML request.')
# Exception handling for transaction response errors.
try:
error = response_json.transaction_response.errors[0]
raise AuthorizeResponseError(error.error_code, error.error_text, response_json)
except (KeyError, AttributeError): # Attempt to access transaction response errors
pass
# Throw an exception for invalid calls. This makes error handling easier.
if response_json.messages[0].result_code != 'Ok':
error = response_json.messages[0].message
raise AuthorizeResponseError(error.code, error.text, response_json)
return response_json |
<SYSTEM_TASK:>
Decorator to mark views used both for signup & sign in.
<END_TASK>
<USER_TASK:>
Description:
def sitegate_view(*args_dec, **kwargs_dec):
"""Decorator to mark views used both for signup & sign in.""" |
if len(args_dec): # simple decoration w/o parameters
return signup_view(signin_view(redirect_signedin(*args_dec, **kwargs_dec)))
signin = signin_view(**kwargs_dec)
signup = signup_view(**kwargs_dec)
return lambda *args, **kwargs: signup(signin(redirect_signedin(*args, **kwargs))) |
<SYSTEM_TASK:>
Generate the bytes for an ULID.
<END_TASK>
<USER_TASK:>
Description:
def generate_binary_ulid(timestamp=None, monotonic=False):
"""
Generate the bytes for an ULID.
:param timestamp: An optional timestamp override.
If `None`, the current time is used.
:type timestamp: int|float|datetime.datetime|None
:param monotonic: Attempt to ensure ULIDs are monotonically increasing.
Monotonic behavior is not guaranteed when used from multiple threads.
:type monotonic: bool
:return: Bytestring of length 16.
:rtype: bytes
""" |
global _last_entropy, _last_timestamp
if timestamp is None:
timestamp = time.time()
elif isinstance(timestamp, datetime.datetime):
timestamp = calendar.timegm(timestamp.utctimetuple())
ts = int(timestamp * 1000.0)
ts_bytes = _to_binary(
(ts >> shift) & 0xFF for shift in (40, 32, 24, 16, 8, 0)
)
entropy = os.urandom(10)
if monotonic and _last_timestamp == ts and _last_entropy is not None:
while entropy < _last_entropy:
entropy = os.urandom(10)
_last_entropy = entropy
_last_timestamp = ts
return ts_bytes + entropy |
<SYSTEM_TASK:>
Generate an ULID, but expressed as an UUID.
<END_TASK>
<USER_TASK:>
Description:
def generate_ulid_as_uuid(timestamp=None, monotonic=False):
"""
Generate an ULID, but expressed as an UUID.
:param timestamp: An optional timestamp override.
If `None`, the current time is used.
:type timestamp: int|float|datetime.datetime|None
:param monotonic: Attempt to ensure ULIDs are monotonically increasing.
Monotonic behavior is not guaranteed when used from multiple threads.
:type monotonic: bool
:return: UUID containing ULID data.
:rtype: uuid.UUID
""" |
return uuid.UUID(bytes=generate_binary_ulid(timestamp, monotonic=monotonic)) |
<SYSTEM_TASK:>
Instantiate a session with the desired configuration parameters,
<END_TASK>
<USER_TASK:>
Description:
def make_session(scraper):
""" Instantiate a session with the desired configuration parameters,
including the cache policy. """ |
cache_path = os.path.join(scraper.config.data_path, 'cache')
cache_policy = scraper.config.cache_policy
cache_policy = cache_policy.lower().strip()
session = ScraperSession()
session.scraper = scraper
session.cache_policy = cache_policy
adapter = CacheControlAdapter(
FileCache(cache_path),
cache_etags=True,
controller_class=PolicyCacheController
)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session |
<SYSTEM_TASK:>
Create JSON object out of the response.
<END_TASK>
<USER_TASK:>
Description:
def json(self, **kwargs):
""" Create JSON object out of the response. """ |
try:
return super(ScraperResponse, self).json(**kwargs)
except ValueError as ve:
raise ParseException(ve) |
<SYSTEM_TASK:>
Collapse all consecutive whitespace, newlines and tabs
<END_TASK>
<USER_TASK:>
Description:
def collapse_whitespace(text):
""" Collapse all consecutive whitespace, newlines and tabs
in a string into single whitespaces, and strip the outer
whitespace. This will also accept an ``lxml`` element and
extract all text. """ |
if text is None:
return None
if hasattr(text, 'xpath'):
text = text.xpath('string()')
text = re.sub('\s+', ' ', text)
return text.strip() |
<SYSTEM_TASK:>
Sets up the patterns and compiled regex objects for parsing types.
<END_TASK>
<USER_TASK:>
Description:
def setup_regex(self):
"""Sets up the patterns and compiled regex objects for parsing types.""" |
#Regex for matching the entire body of the type and getting top-level modifiers.
self._RX_TYPE = r"\n\s*type(?P<modifiers>,\s+(public|private))?(\s*::)?\s+(?P<name>[A-Za-z0-9_]+)" + \
r"(?P<contents>.+?)end\s*type(\s+(?P=name))?"
self.RE_TYPE = re.compile(self._RX_TYPE, re.DOTALL | re.I)
#This regex is the same as RE_TYPE, only the contents are removed from the definition.
self._RX_SIG = r"type(?P<modifiers>,\s+(public|private))?(\s+::)?\s+(?P<name>[A-Za-z0-9_]+)"
self.RE_SIG = re.compile(self._RX_SIG, re.I)
#Regex for finding if the type is private
self._RX_PRIV = "private.+?(contains)?"
self.RE_PRIV = re.compile(self._RX_PRIV, re.DOTALL | re.I)
#Regex for finding methods buried in a type declaration.
self._RX_EXEC = r"^\s*(?P<modifiers>[^:]+)\s+::\s+(?P<name>[A-Za-z0-9_]+)" + \
r"(\s+=>\s+(?P<points>[A-Za-z0-9_]+))?$"
self.RE_EXEC = re.compile(self._RX_EXEC, re.M | re.I)
#Regex for getting text after contains statement
self._RX_CONTAINS = "\n\s*contains(?P<remainder>.+)"
self.RE_CONTAINS = re.compile(self._RX_CONTAINS, re.DOTALL | re.I) |
<SYSTEM_TASK:>
As part of real-time update, parses the statement and adjusts the attributes
<END_TASK>
<USER_TASK:>
Description:
def parse_line(self, statement, element, mode):
"""As part of real-time update, parses the statement and adjusts the attributes
of the specified CustomType instance to reflect the changes.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the CustomType instance to update.
:arg mode: 'insert', or 'delete'.
""" |
if element.incomplete:
#We need to check for the end_token so we can close up the incomplete
#status for the instance.
if element.end_token in statement:
element.incomplete = False
return
#This method deals with updating the *body* of the type declaration. The only
#possible entries in the body are member variable declarations and type
#executable definitions.
self._process_execs_contents(statement, element.module.name, element, mode)
self._rt_parse_members(statement, element, mode) |
<SYSTEM_TASK:>
Finds all the member declarations in 'statement' and adds the
<END_TASK>
<USER_TASK:>
Description:
def _rt_members_add(self, element, statement):
"""Finds all the member declarations in 'statement' and adds the
corresponding instances to element.members.""" |
members = self.vparser.parse(statement, None)
for member in members:
single = members[member]
single.parent = element
element.members[member] = single |
<SYSTEM_TASK:>
Finds all the member declarations in 'statement' and removes the
<END_TASK>
<USER_TASK:>
Description:
def _rt_members_delete(self, element, statement):
"""Finds all the member declarations in 'statement' and removes the
corresponding instances from element.members.""" |
removals = self.vparser.parse(statement, None)
for member in removals:
if member in element.members:
del element.members[member] |
<SYSTEM_TASK:>
Extracts all the types from the specified module body.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, module):
"""Extracts all the types from the specified module body.""" |
matches = self.RE_TYPE.finditer(module.contents)
result = {}
for match in matches:
name = match.group("name")
modifiers = match.group("modifiers")
if modifiers is not None:
cleanmods = re.split("[\s,]+", modifiers.strip())
else:
cleanmods = []
contents = match.group("contents")
result[name.lower()] = self._process_type(name, cleanmods, contents, module, match)
if "public" in result[name.lower()].modifiers:
module.publics[name.lower()] = 1
#Set the types we found in the module and then move the embedded
#ones into their correct parent executables.
module.types = result
module.update_embedded("types") |
<SYSTEM_TASK:>
Updates the documentation for the specified type using the module predocs.
<END_TASK>
<USER_TASK:>
Description:
def update_docs(self, t, module):
"""Updates the documentation for the specified type using the module predocs.""" |
#We need to look in the parent module docstrings for this types decorating tags.
key = "{}.{}".format(module.name, t.name)
if key in module.predocs:
t.docstring = self.docparser.to_doc(module.predocs[key][0], t.name)
t.docstart, t.docend = (module.predocs[key][1], module.predocs[key][2]) |
<SYSTEM_TASK:>
Extracts all the executable methods that belong to the type.
<END_TASK>
<USER_TASK:>
Description:
def _process_execs(self, contents, modulename, atype, mode="insert"):
"""Extracts all the executable methods that belong to the type.""" |
#We only want to look at text after the contains statement
match = self.RE_CONTAINS.search(contents)
#It is possible for the type to not have any executables
if match is not None:
exectext = match.group("remainder")
self._process_execs_contents(exectext, modulename, atype, mode) |
<SYSTEM_TASK:>
Logs into MAL and sets cookies appropriately.
<END_TASK>
<USER_TASK:>
Description:
def login(self):
"""Logs into MAL and sets cookies appropriately.
:rtype: :class:`.Session`
:return: The current session.
""" |
# POSTS a login to mal.
mal_headers = {
'Host': 'myanimelist.net',
}
mal_payload = {
'username': self.username,
'password': self.password,
'cookie': 1,
'sublogin': 'Login'
}
self.session.headers.update(mal_headers)
r = self.session.post(u'http://myanimelist.net/login.php', data=mal_payload)
return self |
<SYSTEM_TASK:>
Looks at the stack, to see if a debug message should be printed.
<END_TASK>
<USER_TASK:>
Description:
def dbg(message, *args):
""" Looks at the stack, to see if a debug message should be printed. """ |
if debug_function and enable_notice:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not (mod.__name__ in ignored_modules):
i = ' ' * _debug_indent
debug_function(NOTICE, i + 'dbg: ' + message % args) |
<SYSTEM_TASK:>
Counts the number of dimensions from a nested list of dimension assignments
<END_TASK>
<USER_TASK:>
Description:
def count_dimensions(entry):
"""Counts the number of dimensions from a nested list of dimension assignments
that may include function calls.
""" |
result = 0
for e in entry:
if isinstance(e, str):
sliced = e.strip(",").split(",")
result += 0 if len(sliced) == 1 and sliced[0] == "" else len(sliced)
return result |
<SYSTEM_TASK:>
Parses all the value code elements from the specified string.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, string, parent):
"""Parses all the value code elements from the specified string.""" |
result = {}
for member in self.RE_MEMBERS.finditer(string):
mems = self._process_member(member, parent, string)
#The regex match could contain multiple members that were defined
#on the same line in the code file.
for onemem in mems:
result[onemem.name.lower()] = onemem
return result |
<SYSTEM_TASK:>
Extracts all the member info from the regex match; returns a ValueElements.
<END_TASK>
<USER_TASK:>
Description:
def _process_member(self, member, parent, string):
"""Extracts all the member info from the regex match; returns a ValueElements.""" |
#The modifiers regex is very greedy so we have some cleaning up to do
#to extract the mods.
modifiers = member.group("modifiers")
dimension = None
if modifiers is not None:
#Unfortunately, the dimension can also be specified as a modifier and
#the dimensions can include variable names and functions. This introduces
#the possibility of nested lists.
modifiers = modifiers.lower()
if "dimension" in modifiers:
start, end = self._get_dim_modifier(modifiers)
dimension = modifiers[start+1:end]
dimtext = modifiers[modifiers.index("dimension"):end+1]
modifiers = re.split(",\s*", modifiers.replace(dimtext, "").strip())
#modifiers.append("dimension")
else:
modifiers = re.split("[,\s]+", modifiers.strip())
if "" in modifiers:
modifiers.remove("")
dtype = member.group("type")
kind = member.group("kind")
names = member.group("names")
#If there are multiple vars defined on this line we need to return
#a list of all of them.
result = []
#They might have defined multiple vars on the same line
refstring = string[member.start():member.end()].strip()
if parent is not None:
refline = parent.module.linenum(member.start())
else:
refline = "?"
ready = self._separate_multiple_def(re.sub(",\s*", ", ", names.strip()), parent, refstring, refline)
for name, ldimension, default, D in self._clean_multiple_def(ready):
#Now construct the element and set all the values, then add it in the results list.
udim = ldimension if ldimension is not None else dimension
uD = D if ldimension is not None else count_dimensions([dimension])
result.append(ValueElement(name, modifiers, dtype, kind, default, udim, parent, uD))
return result |
<SYSTEM_TASK:>
Collapses the list structure in entry to a single string representing the default
<END_TASK>
<USER_TASK:>
Description:
def _collapse_default(self, entry):
"""Collapses the list structure in entry to a single string representing the default
value assigned to a variable or its dimensions.
""" |
if isinstance(entry, tuple) or isinstance(entry, list):
sets = []
i = 0
while i < len(entry):
if isinstance(entry[i], str) and i+1 < len(entry) and isinstance(entry[i+1], list):
sets.append((entry[i], entry[i+1]))
i += 2
elif isinstance(entry[i], str) and entry[i] == ",":
i += 1
else:
sets.append((entry[i],))
i += 1
result = []
for s in sets:
if isinstance(s[0], str):
name = s[0].strip(",")
elif len(s) == 1:
name = self._collapse_default(s[0])
if len(s) > 1:
args = self._collapse_default(s[1])
else:
args = []
if len(args) > 0:
result.append("{}({})".format(name, args))
else:
result.append(name)
return ', '.join(result)
else:
if "," in entry:
return entry.split(",")[0].strip()
else:
return entry.strip() |
<SYSTEM_TASK:>
Cleans the list of variable definitions extracted from the definition text to
<END_TASK>
<USER_TASK:>
Description:
def _clean_multiple_def(self, ready):
"""Cleans the list of variable definitions extracted from the definition text to
get hold of the dimensions and default values.
""" |
result = []
for entry in ready:
if isinstance(entry, list):
#This variable declaration has a default value specified, which is in the
#second slot of the list.
default = self._collapse_default(entry[1])
#For hard-coded array defaults, add the parenthesis back in.
if default[0] == "/":
default = "({})".format(default)
namedim = entry[0]
else:
default = None
namedim = entry
if isinstance(namedim, str):
name = namedim.strip().strip(",")
dimension = None
D = 0
else:
#Namedim is a tuple of (name, dimension)
name = namedim[0].strip()
D = count_dimensions(namedim[1])
dimension = self._collapse_default(namedim[1])
result.append((name, dimension, default, D))
return result |
<SYSTEM_TASK:>
Create a range generator for chars
<END_TASK>
<USER_TASK:>
Description:
def char_range(starting_char, ending_char):
"""
Create a range generator for chars
""" |
assert isinstance(starting_char, str), 'char_range: Wrong argument/s type'
assert isinstance(ending_char, str), 'char_range: Wrong argument/s type'
for char in range(ord(starting_char), ord(ending_char) + 1):
yield chr(char) |
<SYSTEM_TASK:>
Finds out whether there are intervals to expand and
<END_TASK>
<USER_TASK:>
Description:
def parse_charset(charset):
"""
Finds out whether there are intervals to expand and
creates the charset
""" |
import re
regex = r'(\w-\w)'
pat = re.compile(regex)
found = pat.findall(charset)
result = ''
if found:
for element in found:
for char in char_range(element[0], element[-1]):
result += char
return result
return charset |
<SYSTEM_TASK:>
Return the viewable size of the Table as @tuple (x,y)
<END_TASK>
<USER_TASK:>
Description:
def size(self):
"""Return the viewable size of the Table as @tuple (x,y)""" |
width = max(
map(lambda x: x.size()[0], self.sections.itervalues()))
height = sum(
map(lambda x: x.size()[1], self.sections.itervalues()))
return width, height |
<SYSTEM_TASK:>
Process footer and return the processed string
<END_TASK>
<USER_TASK:>
Description:
def get_ftr(self):
"""
Process footer and return the processed string
""" |
if not self.ftr:
return self.ftr
width = self.size()[0]
return re.sub(
"%time", "%s\n" % time.strftime("%H:%M:%S"), self.ftr).rjust(width) |
<SYSTEM_TASK:>
Sets up the header for the help command that explains the background on how to use
<END_TASK>
<USER_TASK:>
Description:
def do_help(self, arg):
"""Sets up the header for the help command that explains the background on how to use
the script generally. Help for each command then stands alone in the context of this
documentation. Although we could have documented this on the wiki, it is better served
when shipped with the shell.
""" |
if arg == "":
lines = [("The fortpy unit testing analysis shell makes it easy to analyze the results "
"of multiple test cases, make plots of trends and tabulate values for use in "
"other applications. This documentation will provide an overview of the basics. "
"Use 'help <command>' to get specific command help."),
("Each fortpy shell session can hold the results of multiple unit tests. You can "
"load a unit test's results into the session using one of the 'parse' commands. "
"Once the test is loaded you can tabulate and plot results by setting test case "
"filters ('filter'), and independent and dependent variables ('indep', 'dep')."
"Switch between different unit tests loaded into the session using 'set'."),
("To make multiple plots/tables for the same unit test, create new analysis "
"groups ('group'). "
"Each group has its own set of properties that can be set (e.g. variables, plot "
"labels for axes, filters for test cases, etc.) The possible properties that affect "
"each command are listed in the specific help for that command."),
("You can save the state of a shell session using 'save' and then recover it at "
"a later time using 'load'. When a session is re-loaded, all the variables and "
"properties/settings for plots/tables are maintained and the latest state of the "
"unit test's results are used. A console history is also maintained with bash-like "
"commands (e.g. Ctrl-R for reverse history search, etc.) across sessions. You can "
"manipulate its behavior with 'history'.")]
self._fixed_width_info(lines)
cmd.Cmd.do_help(self, arg) |
<SYSTEM_TASK:>
Prints the specified string as information with fixed width of 80 chars.
<END_TASK>
<USER_TASK:>
Description:
def _fixed_width_info(self, lines):
"""Prints the specified string as information with fixed width of 80 chars.""" |
for string in lines:
for line in [string[i:i+80] for i in range(0, len(string), 80)]:
msg.info(line)
msg.blank() |
<SYSTEM_TASK:>
Outputs the specified value to the console or a file depending on the redirect
<END_TASK>
<USER_TASK:>
Description:
def _redirect_output(self, value, filename=None, append=None, printfun=None):
"""Outputs the specified value to the console or a file depending on the redirect
behavior specified.
:arg value: the string text to print or save.
:arg filename: the name of the file to save the text to.
:arg append: when true, the text is appended to the file if it exists.
""" |
if filename is None:
if printfun is None:
print(value)
else:
printfun(value)
else:
if append:
mode = 'a'
else:
mode = 'w'
from os import path
fullpath = path.abspath(filename)
with open(filename, mode) as f:
f.write(value + '\n') |
<SYSTEM_TASK:>
Returns the completion list of possible test cases for the active unit test.
<END_TASK>
<USER_TASK:>
Description:
def _complete_cases(self, text, line, istart, iend):
"""Returns the completion list of possible test cases for the active unit test.""" |
if text == "":
return list(self.live.keys())
else:
return [c for c in self.live if c.startswith(text)] |
<SYSTEM_TASK:>
Sets the value of the argument with the specified id using the argument passed
<END_TASK>
<USER_TASK:>
Description:
def _set_arg_generic(self, argid, arg, cast=str):
"""Sets the value of the argument with the specified id using the argument passed
in from the shell session.
""" |
usable, filename, append = self._redirect_split(arg)
if usable != "":
self.curargs[argid] = cast(usable)
if argid in self.curargs:
result = "{}: '{}'".format(argid.upper(), self.curargs[argid])
self._redirect_output(result, filename, append, msg.info) |
<SYSTEM_TASK:>
Prints a dictionary that has variable => value mappings.
<END_TASK>
<USER_TASK:>
Description:
def _print_map_dict(self, argkey, filename, append):
"""Prints a dictionary that has variable => value mappings.""" |
result = []
skeys = list(sorted(self.curargs[argkey].keys()))
for key in skeys:
result.append("'{}' => {}".format(key, self.curargs[argkey][key]))
self._redirect_output('\n'.join(result), filename, append, msg.info) |
<SYSTEM_TASK:>
Sets the function to apply to the values of a specific variable before plotting
<END_TASK>
<USER_TASK:>
Description:
def do_postfix(self, arg):
"""Sets the function to apply to the values of a specific variable before plotting
or tabulating values.
""" |
usable, filename, append = self._redirect_split(arg)
sargs = usable.split()
if len(sargs) == 1 and sargs[0] == "list":
self._print_map_dict("functions", filename, append)
elif len(sargs) >= 2:
defvars = self._postfix_varlist("postfix " + arg)
for var in defvars.values():
if not self._validate_var(var):
msg.err("Variable '{}' is not a valid variable|property combination.")
return
fxn = arg.split()[-1]
if ":" not in fxn:
msg.err("{} is not a valid postfix function expression.")
self.help_postfix()
return
modvar = fxn.split(":")[0]
if modvar not in defvars:
msg.err("Invalid postfix function: variable '{}' not defined.".format(modvar))
return
defvars["lambda"] = fxn
self.curargs["functions"][defvars[modvar]] = defvars
#Give the user some feedback so that they know it was successful.
self.do_postfix("list") |
<SYSTEM_TASK:>
Removes a postfix function from a variable. See 'postfix'.
<END_TASK>
<USER_TASK:>
Description:
def do_rmpostfix(self, arg):
"""Removes a postfix function from a variable. See 'postfix'.""" |
altered = False
if arg in self.curargs["functions"]:
del self.curargs["functions"][arg]
altered = True
elif arg == "*":
for varname in list(self.curargs["functions"].keys()):
del self.curargs["functions"][varname]
altered = True
if altered:
self.do_postfix("list") |
<SYSTEM_TASK:>
Removes a fit function from a variable. See 'fit'.
<END_TASK>
<USER_TASK:>
Description:
def do_rmfit(self, arg):
"""Removes a fit function from a variable. See 'fit'.""" |
if arg in self.curargs["fits"]:
del self.curargs["fits"][arg]
#We also need to remove the variable entry if it exists.
if "timing" in arg:
fitvar = "{}|fit".format(arg)
else:
fitvar = "{}.fit".format(arg)
if fitvar in self.curargs["dependents"]:
self.curargs["dependents"].remove(fitvar) |
<SYSTEM_TASK:>
Returns a copy of the settings dictionary for the specified option in
<END_TASK>
<USER_TASK:>
Description:
def _get_matplot_dict(self, option, prop, defdict):
"""Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary.
:arg option: the key in self.curargs to update.
:arg defdict: the default dictionary whose keys should be used when values match.
""" |
cargs = self.curargs[option]
result = cargs.copy()
for varname in cargs:
if prop in cargs[varname]:
name = cargs[varname][prop]
for key, val in list(defdict.items()):
if val == name:
cargs[varname][prop] = key
break
return result |
<SYSTEM_TASK:>
Plots the current state of the shell, saving the value to the specified file
<END_TASK>
<USER_TASK:>
Description:
def _plot_generic(self, filename=None):
"""Plots the current state of the shell, saving the value to the specified file
if specified.
""" |
#Since the filename is being passed directly from the argument, check its validity.
if filename == "":
filename = None
if "x" not in self.curargs["labels"]:
#Set a default x-label since we know what variable is being plotted.
self.curargs["labels"]["x"] = "Value of '{}' (unknown units)".format(self.curargs["independent"])
args = self.curargs
a = self.tests[self.active]
self._make_fits()
#Before we can pass the markers in, we need to translate from keys to values so
#that matplotlib understands.
markdict = self._get_matplot_dict("markers", "marker", self._possible_markers)
linedict = self._get_matplot_dict("lines", "style", self._possible_linestyles)
#Set the remaining arguments to have the right keyword name.
args["savefile"] = filename
args["markers"] = markdict
args["lines"] = linedict
a.plot(**args) |
<SYSTEM_TASK:>
Plots the current state of the shell's independent vs. dependent variables on the
<END_TASK>
<USER_TASK:>
Description:
def do_plot(self, arg):
"""Plots the current state of the shell's independent vs. dependent variables on the
same set of axes. Give filename to save to as argument or leave blank to show.
""" |
usable, filename, append = self._redirect_split(arg)
self.curargs["xscale"] = None
self.curargs["yscale"] = None
self._plot_generic(filename) |
<SYSTEM_TASK:>
Sets the default prompt to match the currently active unit test.
<END_TASK>
<USER_TASK:>
Description:
def _set_def_prompt(self):
"""Sets the default prompt to match the currently active unit test.""" |
if len(self.active) > 15:
ids = self.active.split(".")
if len(ids) > 2:
module, executable, compiler = ids
else:
module, executable = ids
compiler = "g"
self.prompt = "({}*.{}*.{}:{})".format(module[0:6], executable[0:6], compiler, self.group)
else:
self.prompt = "({}:{})".format(self.active, self.group) |
<SYSTEM_TASK:>
Sets the specified 'module.executable' to be the active test result to interact with.
<END_TASK>
<USER_TASK:>
Description:
def do_set(self, arg):
"""Sets the specified 'module.executable' to be the active test result to interact with.
""" |
if arg in self.tests:
self.active = arg
#Create a default argument set and analysis group for the current plotting
if arg not in self.args:
self.args[arg] = {"default": dict(self._template_args)}
self.group = "default"
else:
self.group = list(self.curargs.keys())[0]
#Change the prompt so that they know which unit test is being edited.
self._set_def_prompt()
else:
msg.err("The test case '{}' is not valid.".format(arg)) |
<SYSTEM_TASK:>
Loads a saved session variables, settings and test results to the shell.
<END_TASK>
<USER_TASK:>
Description:
def do_load(self, arg):
"""Loads a saved session variables, settings and test results to the shell.""" |
from os import path
import json
fullpath = path.expanduser(arg)
if path.isfile(fullpath):
with open(fullpath) as f:
data = json.load(f)
#Now, reparse the staging directories that were present in the saved session.
for stagepath in data["tests"]:
self.do_parse(stagepath)
self.args = data["args"] |
<SYSTEM_TASK:>
Reparses the currently active unit test to get the latest test results loaded
<END_TASK>
<USER_TASK:>
Description:
def do_reparse(self, arg):
"""Reparses the currently active unit test to get the latest test results loaded
to the console.
""" |
#We just get the full path of the currently active test and hit reparse.
full = arg == "full"
from os import path
fullpath = path.abspath(self.tests[self.active].stagedir)
self.tests[self.active] = Analysis(fullpath, full) |
<SYSTEM_TASK:>
Generates the data fits for any variables set for fitting in the shell.
<END_TASK>
<USER_TASK:>
Description:
def _make_fits(self):
"""Generates the data fits for any variables set for fitting in the shell.""" |
a = self.tests[self.active]
args = self.curargs
#We need to generate a fit for the data if there are any fits specified.
if len(args["fits"]) > 0:
for fit in list(args["fits"].keys()):
a.fit(args["independent"], fit, args["fits"][fit], args["threshold"], args["functions"]) |
<SYSTEM_TASK:>
Prints the set of values for the independent vs. dependent variables in the
<END_TASK>
<USER_TASK:>
Description:
def do_table(self, arg):
"""Prints the set of values for the independent vs. dependent variables in the
active unit test and analysis group as a table.
""" |
usable, filename, append = self._redirect_split(arg)
a = self.tests[self.active]
args = self.curargs
self._make_fits()
result = a.table(args["independent"], args["dependents"], args["threshold"],
args["headings"], args["functions"])
if result is not None:
self._redirect_output(result, filename, append, msg.info) |
<SYSTEM_TASK:>
Prints a list of test cases that failed for the current unit test and analysis
<END_TASK>
<USER_TASK:>
Description:
def do_failures(self, arg):
"""Prints a list of test cases that failed for the current unit test and analysis
group settings. To only check failure on specific output files, set the list of
files to check as arguments.
""" |
usable, filename, append = self._redirect_split(arg)
a = self.tests[self.active]
args = self.curargs
splitargs = usable.split()
if len(splitargs) > 0:
tfilter = splitargs[0]
else:
tfilter = "*"
outfiles = None
if len(splitargs) > 1:
outfiles = splitargs[1:len(splitargs)]
result = a.failures(outfiles, args["threshold"], tfilter)
self._redirect_output(result, filename, append, msg.info) |
<SYSTEM_TASK:>
Returns the full path to the console history file.
<END_TASK>
<USER_TASK:>
Description:
def histpath(self):
"""Returns the full path to the console history file.""" |
from os import path
from fortpy import settings
return path.join(settings.cache_directory, "history") |
<SYSTEM_TASK:>
Stores the information about the last unhandled exception.
<END_TASK>
<USER_TASK:>
Description:
def _store_lasterr(self):
"""Stores the information about the last unhandled exception.""" |
from sys import exc_info
from traceback import format_exception
e = exc_info()
self.lasterr = '\n'.join(format_exception(e[0], e[1], e[2])) |
<SYSTEM_TASK:>
Makes sure that the command specified in the line is valid given the current
<END_TASK>
<USER_TASK:>
Description:
def precmd(self, line):
"""Makes sure that the command specified in the line is valid given the current
status of loaded unit tests and analysis group.
""" |
if line == "":
return ""
command = line.split()[0]
if "!" in command:
value = command.split("!")[1]
try:
ihist = int(value)
import readline
if ihist <= readline.get_current_history_length():
return readline.get_history_item(ihist)
else:
msg.warn("The specified history item {} ".format(ihist) +
"does not exist in the history.")
return ""
except ValueError:
#This clearly isn't a bash style recursion of a past history item.
#Just run the command as it was originally entered.
return line
else:
if command in self._test_cmds or command in self._group_cmds:
#We have to test the active unit test for both the test commands and the
#group commands, since the group commands rely on the active unit test.
if self.active is None or self.active not in self.tests:
msg.err("The specified command '{}' requires an ".format(command) +
"active unit test to be loaded. Use 'parse' or 'load'.")
return ""
elif (command in self._group_cmds and (self.group is None or
self.group not in self.args[self.active])):
msg.err("No valid analysis group is active. Use 'group' to create "
"one or mark an existing one as active.")
return ""
else:
return line
if command in self._var_cmds:
#We need to make sure that we have variables set.
if self.curargs["independent"] is None or len(self.curargs["dependents"]) == 0:
msg.err("This command requires an independent variable to be set and "
"at least one dependent variable.\n See 'dep' and 'indep' commands.")
return ""
else:
return line
else:
return line |
<SYSTEM_TASK:>
Imitates the bash shell 'cd' command.
<END_TASK>
<USER_TASK:>
Description:
def do_cd(self, arg):
"""Imitates the bash shell 'cd' command.""" |
from os import chdir, path
fullpath = path.abspath(path.expanduser(arg))
if path.isdir(fullpath):
chdir(fullpath)
else:
msg.err("'{}' is not a valid directory.".format(arg)) |
<SYSTEM_TASK:>
Generates branches with alternative system.
<END_TASK>
<USER_TASK:>
Description:
def generate_branches(scales=None, angles=None, shift_angle=0):
"""Generates branches with alternative system.
Args:
scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age.
angles (tuple/array): Holding the branch and shift angle in radians.
shift_angle (float): Holding the rotation angle for all branches.
Returns:
branches (2d-array): A array constits of arrays holding scale and angle for every branch.
""" |
branches = []
for pos, scale in enumerate(scales):
angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle
branches.append([scale, angle])
return branches |
<SYSTEM_TASK:>
Gets the coordinates of the rectangle, in which the tree can be put.
<END_TASK>
<USER_TASK:>
Description:
def get_rectangle(self):
"""Gets the coordinates of the rectangle, in which the tree can be put.
Returns:
tupel: (x1, y1, x2, y2)
""" |
rec = [self.pos[0], self.pos[1]]*2
for age in self.nodes:
for node in age:
# Check max/min for x/y coords
for i in range(2):
if rec[0+i] > node.pos[i]:
rec[0+i] = node.pos[i]
elif rec[2+i] < node.pos[i]:
rec[2+i] = node.pos[i]
return tuple(rec) |
<SYSTEM_TASK:>
Get the size of the tree.
<END_TASK>
<USER_TASK:>
Description:
def get_size(self):
"""Get the size of the tree.
Returns:
tupel: (width, height)
""" |
rec = self.get_rectangle()
return (int(rec[2]-rec[0]), int(rec[3]-rec[1])) |
<SYSTEM_TASK:>
Get the length of a branch.
<END_TASK>
<USER_TASK:>
Description:
def get_branch_length(self, age=None, pos=0):
"""Get the length of a branch.
This method calculates the length of a branch in specific age.
The used formula: length * scale^age.
Args:
age (int): The age, for which you want to know the branch length.
Returns:
float: The length of the branch
""" |
if age is None:
age = self.age
return self.length * pow(self.branches[pos][0], age) |
<SYSTEM_TASK:>
Get, how much steps will needed for a given branch length.
<END_TASK>
<USER_TASK:>
Description:
def get_steps_branch_len(self, length):
"""Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length.
""" |
return log(length/self.length, min(self.branches[0][0])) |
<SYSTEM_TASK:>
Get sum of all branches in the tree.
<END_TASK>
<USER_TASK:>
Description:
def get_node_sum(self, age=None):
"""Get sum of all branches in the tree.
Returns:
int: The sum of all nodes grown until the age.
""" |
if age is None:
age = self.age
return age if self.comp == 1 else int((pow(self.comp, age+1) - 1) / (self.comp - 1)) |
<SYSTEM_TASK:>
Get the sum of branches grown in an specific age.
<END_TASK>
<USER_TASK:>
Description:
def get_node_age_sum(self, age=None):
"""Get the sum of branches grown in an specific age.
Returns:
int: The sum of all nodes grown in an age.
""" |
if age is None:
age = self.age
return pow(self.comp, age) |
<SYSTEM_TASK:>
Get the tree nodes as list.
<END_TASK>
<USER_TASK:>
Description:
def get_nodes(self):
"""Get the tree nodes as list.
Returns:
list: A 2d-list holding the grown nodes coordinates as tupel for every age.
Example:
[
[(10, 40)],
[(20, 80), (100, 30)],
[(100, 90), (120, 40), ...],
...
]
""" |
nodes = []
for age, level in enumerate(self.nodes):
nodes.append([])
for node in level:
nodes[age].append(node.get_tuple())
return nodes |
<SYSTEM_TASK:>
Get the tree branches as list.
<END_TASK>
<USER_TASK:>
Description:
def get_branches(self):
"""Get the tree branches as list.
Returns:
list: A 2d-list holding the grown branches coordinates as tupel for every age.
Example:
[
[(10, 40, 90, 30)],
[(90, 30, 100, 40), (90, 30, 300, 60)],
[(100, 40, 120, 70), (100, 40, 150, 90), ...],
...
]
""" |
branches = []
for age, level in enumerate(self.nodes):
branches.append([])
for n, node in enumerate(level):
if age == 0:
p_node = Node(self.pos[:2])
else:
p_node = self._get_node_parent(age-1, n)
branches[age].append(p_node.get_tuple() + node.get_tuple())
return branches |
<SYSTEM_TASK:>
Move the tree.
<END_TASK>
<USER_TASK:>
Description:
def move(self, delta):
"""Move the tree.
Args:
delta (tupel): The adjustment of the position.
""" |
pos = self.pos
self.pos = (pos[0]+delta[0], pos[1]+delta[1], pos[2]+delta[0], pos[3]+delta[1])
# Move all nodes
for age in self.nodes:
for node in age:
node.move(delta) |
<SYSTEM_TASK:>
Let the tree grow.
<END_TASK>
<USER_TASK:>
Description:
def grow(self, times=1):
"""Let the tree grow.
Args:
times (integer): Indicate how many times the tree will grow.
""" |
self.nodes.append([])
for n, node in enumerate(self.nodes[self.age]):
if self.age == 0:
p_node = Node(self.pos[:2])
else:
p_node = self._get_node_parent(self.age-1, n)
angle = node.get_node_angle(p_node)
for i in range(self.comp):
tot_angle = self.__get_total_angle(angle, i)
length = self.__get_total_length(self.age+1, i)
self.nodes[self.age+1].append(node.make_new_node(length, tot_angle))
self.age += 1
if times > 1:
self.grow(times-1) |
<SYSTEM_TASK:>
Draw the tree on a canvas.
<END_TASK>
<USER_TASK:>
Description:
def draw_on(self, canvas, stem_color, leaf_color, thickness, ages=None):
"""Draw the tree on a canvas.
Args:
canvas (object): The canvas, you want to draw the tree on. Supported canvases: svgwrite.Drawing and PIL.Image (You can also add your custom libraries.)
stem_color (tupel): Color or gradient for the stem of the tree.
leaf_color (tupel): Color for the leaf (= the color for last iteration).
thickness (int): The start thickness of the tree.
""" |
if canvas.__module__ in SUPPORTED_CANVAS:
drawer = SUPPORTED_CANVAS[canvas.__module__]
drawer(self, canvas, stem_color, leaf_color, thickness, ages).draw() |
<SYSTEM_TASK:>
Get the parent node of node, whch is located in tree's node list.
<END_TASK>
<USER_TASK:>
Description:
def _get_node_parent(self, age, pos):
"""Get the parent node of node, whch is located in tree's node list.
Returns:
object: The parent node.
""" |
return self.nodes[age][int(pos / self.comp)] |
<SYSTEM_TASK:>
Abstraction of field lookups for managers.
<END_TASK>
<USER_TASK:>
Description:
def _field_lookups(model, status=None):
"""
Abstraction of field lookups for managers.
Returns a dictionary of field lookups for a queryset. The lookups
will always filter by site. Optionally, if ``status`` is passed to
the function the objects will also be filtered by the given status.
This function saves from having to make two different on-site and
published Managers each for `Topic` and `Question`, and having to move
Managers out of the `FAQBase` model and into each of the `Topic`
and `Question` models.
""" |
# Import models here to avoid circular import fail.
from faq.models import Topic, Question
field_lookups = {}
if model == Topic:
field_lookups['sites__pk'] = settings.SITE_ID
if model == Question:
field_lookups['topic__sites__pk'] = settings.SITE_ID
if status:
field_lookups['topic__status'] = status
# Both Topic & Question have a status field.
if status:
field_lookups['status'] = status
return field_lookups |
<SYSTEM_TASK:>
Determines the compiler used to compile the specified shared library by
<END_TASK>
<USER_TASK:>
Description:
def detect_compiler(libpath):
"""Determines the compiler used to compile the specified shared library by
using the system utilities.
:arg libpath: the full path to the shared library *.so file.
""" |
from os import waitpid, path
from subprocess import Popen, PIPE
command = "nm {0}".format(path.abspath(libpath))
child = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE)
# Need to do this so that we are sure the process is done before moving on
waitpid(child.pid, 0)
contents = child.stdout.readlines()
i = 0
found = False
while i < len(contents) and found == False:
if "_MOD_" in contents[i]:
found = "gfortran"
elif "_mp_" in contents[i]:
found = "ifort"
i += 1
return found |
<SYSTEM_TASK:>
Deallocates the fortran-managed memory that this ctype references.
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
"""Deallocates the fortran-managed memory that this ctype references.
""" |
if not self.deallocated:
#Release/deallocate the pointer in fortran.
method = self._deallocator()
if method is not None:
dealloc = static_symbol("ftypes_dealloc", method, self.libpath, True)
if dealloc is None:
return
arrtype = ndpointer(dtype=int, ndim=1, shape=(len(self.indices),), flags="F")
dealloc.argtypes = [c_void_p, c_int_p, arrtype]
nindices = require(array([i.value for i in self.indices]), int, "F")
dealloc(byref(self.pointer), c_int(len(self.indices)), nindices)
self.deallocated = True |
<SYSTEM_TASK:>
Returns the name of the subroutine in ftypes_dealloc.f90 that can
<END_TASK>
<USER_TASK:>
Description:
def _deallocator(self):
"""Returns the name of the subroutine in ftypes_dealloc.f90 that can
deallocate the array for this Ftype's pointer.
:arg ctype: the string c-type of the variable.
""" |
lookup = {
"c_bool": "logical",
"c_double": "double",
"c_double_complex": "complex",
"c_char": "char",
"c_int": "int",
"c_float": "float",
"c_short": "short",
"c_long": "long"
}
ctype = type(self.pointer).__name__.replace("LP_", "").lower()
if ctype in lookup:
return "dealloc_{0}_{1:d}d".format(lookup[ctype], len(self.indices))
else:
return None |
<SYSTEM_TASK:>
Adds the specified python-typed result and an optional Ftype pointer
<END_TASK>
<USER_TASK:>
Description:
def add(self, varname, result, pointer=None):
"""Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
""" |
self.result[varname] = result
setattr(self, varname, result)
if pointer is not None:
self._finalizers[varname] = pointer |
<SYSTEM_TASK:>
Loads a python module from string
<END_TASK>
<USER_TASK:>
Description:
def load_python_object(name):
"""
Loads a python module from string
""" |
logger = getLoggerWithNullHandler('commando.load_python_object')
(module_name, _, object_name) = name.rpartition(".")
if module_name == '':
(module_name, object_name) = (object_name, module_name)
try:
logger.debug('Loading module [%s]' % module_name)
module = __import__(module_name)
except ImportError:
raise CommandoLoaderException(
"Module [%s] cannot be loaded." % module_name)
if object_name == '':
return module
try:
module = sys.modules[module_name]
except KeyError:
raise CommandoLoaderException(
"Error occured when loading module [%s]" % module_name)
try:
logger.debug('Getting object [%s] from module [%s]' %
(object_name, module_name))
return getattr(module, object_name)
except AttributeError:
raise CommandoLoaderException(
"Cannot load object [%s]. "
"Module [%s] does not contain object [%s]. "
"Please fix the configuration or "
"ensure that the module is installed properly" % (
name,
module_name,
object_name)) |
<SYSTEM_TASK:>
Delegates to `subprocess.check_call`.
<END_TASK>
<USER_TASK:>
Description:
def call(self, *args, **kwargs):
"""
Delegates to `subprocess.check_call`.
""" |
args, kwargs = self.__process__(*args, **kwargs)
return check_call(args, **kwargs) |
<SYSTEM_TASK:>
Delegates to `subprocess.Popen`.
<END_TASK>
<USER_TASK:>
Description:
def open(self, *args, **kwargs):
"""
Delegates to `subprocess.Popen`.
""" |
args, kwargs = self.__process__(*args, **kwargs)
return Popen(args, **kwargs) |
<SYSTEM_TASK:>
The view used to render a tag after the page has loaded.
<END_TASK>
<USER_TASK:>
Description:
def tag(request, tag_id=None):
"""
The view used to render a tag after the page has loaded.
""" |
html = get_tag_html(tag_id)
t = template.Template(html)
c = template.RequestContext(request)
return HttpResponse(t.render(c)) |
<SYSTEM_TASK:>
Get the fill color depending on age.
<END_TASK>
<USER_TASK:>
Description:
def _get_color(self, age):
"""Get the fill color depending on age.
Args:
age (int): The age of the branch/es
Returns:
tuple: (r, g, b)
""" |
if age == self.tree.age:
return self.leaf_color
color = self.stem_color
tree = self.tree
if len(color) == 3:
return color
diff = [color[i+3]-color[i] for i in range(3)]
per_age = [diff[i]/(tree.age-1) for i in range(3)]
return tuple([int(color[i]+per_age[i]*age) for i in range(3)]) |
<SYSTEM_TASK:>
Draws the tree.
<END_TASK>
<USER_TASK:>
Description:
def draw(self):
"""Draws the tree.
Args:
ages (array): Contains the ages you want to draw.
""" |
for age, level in enumerate(self.tree.get_branches()):
if age in self.ages:
thickness = self._get_thickness(age)
color = self._get_color(age)
for branch in level:
self._draw_branch(branch, color, thickness, age) |
<SYSTEM_TASK:>
Create a migration in the current or new revision folder
<END_TASK>
<USER_TASK:>
Description:
def _cmd_create(self):
"""Create a migration in the current or new revision folder
""" |
assert self._message, "need to supply a message for the \"create\" command"
if not self._revisions:
self._revisions.append("1")
# get the migration folder
rev_folder = self._revisions[-1]
full_rev_path = os.path.join(self._migration_path, rev_folder)
if not os.path.exists(full_rev_path):
os.mkdir(full_rev_path)
else:
count = len(glob.glob(os.path.join(full_rev_path, "*")))
# create next revision folder if needed
if count and self._rev and int(self._rev) == 0:
rev_folder = str(int(rev_folder) + 1)
full_rev_path = os.path.join(self._migration_path, rev_folder)
os.mkdir(full_rev_path)
self._revisions.append(rev_folder)
# format file name
filename = '_'.join([s.lower() for s in self._message.split(' ') if s.strip()])
for p in string.punctuation:
if p in filename:
filename = filename.replace(p, '_')
filename = "%s_%s" % (datetime.utcnow().strftime("%Y%m%d%H%M%S"), filename.replace('__', '_'))
# create the migration files
self._log(0, "creating files: ")
for s in ('up', 'down'):
file_path = os.path.join(full_rev_path, "%s.%s.sql" % (filename, s))
with open(file_path, 'a+') as w:
w.write('\n'.join([
'-- *** %s ***' % s.upper(),
'-- file: %s' % os.path.join(rev_folder, filename),
'-- comment: %s' % self._message]))
self._log(0, file_path) |
<SYSTEM_TASK:>
Validate and return the revision to use for current command
<END_TASK>
<USER_TASK:>
Description:
def _get_revision(self):
"""Validate and return the revision to use for current command
""" |
assert self._revisions, "no migration revision exist"
revision = self._rev or self._revisions[-1]
# revision count must be less or equal since revisions are ordered
assert revision in self._revisions, "invalid revision specified"
return revision |
<SYSTEM_TASK:>
Parses the DOM and returns media attributes in the main-content area.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, media_page):
"""Parses the DOM and returns media attributes in the main-content area.
:type media_page: :class:`bs4.BeautifulSoup`
:param media_page: MAL media page's DOM
:rtype: dict
:return: media attributes.
""" |
media_info = self.parse_sidebar(media_page)
try:
synopsis_elt = media_page.find(u'h2', text=u'Synopsis').parent
utilities.extract_tags(synopsis_elt.find_all(u'h2'))
media_info[u'synopsis'] = synopsis_elt.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
try:
related_title = media_page.find(u'h2', text=u'Related ' + self.__class__.__name__)
if related_title:
related_elt = related_title.parent
utilities.extract_tags(related_elt.find_all(u'h2'))
related = {}
for link in related_elt.find_all(u'a'):
href = link.get(u'href').replace(u'http://myanimelist.net', '')
if not re.match(r'/(anime|manga)', href):
break
curr_elt = link.previous_sibling
if curr_elt is None:
# we've reached the end of the list.
break
related_type = None
while True:
if not curr_elt:
raise MalformedAnimePageError(self.id, related_elt, message="Prematurely reached end of related anime listing")
if isinstance(curr_elt, bs4.NavigableString):
type_match = re.match(u'(?P<type>[a-zA-Z\ \-]+):', curr_elt)
if type_match:
related_type = type_match.group(u'type')
break
curr_elt = curr_elt.previous_sibling
title = link.text
# parse link: may be manga or anime.
href_parts = href.split(u'/')
# sometimes links on MAL are broken, of the form /anime//
if href_parts[2] == '':
continue
# of the form: /(anime|manga)/1/Cowboy_Bebop
obj_id = int(href_parts[2])
new_obj = getattr(self.session, href_parts[1])(obj_id).set({'title': title})
if related_type not in related:
related[related_type] = [new_obj]
else:
related[related_type].append(new_obj)
media_info[u'related'] = related
else:
media_info[u'related'] = None
except:
if not self.session.suppress_parse_exceptions:
raise
return media_info |
<SYSTEM_TASK:>
Parses the DOM and returns media character attributes in the sidebar.
<END_TASK>
<USER_TASK:>
Description:
def parse_characters(self, character_page):
"""Parses the DOM and returns media character attributes in the sidebar.
:type character_page: :class:`bs4.BeautifulSoup`
:param character_page: MAL character page's DOM
:rtype: dict
:return: character attributes.
""" |
media_info = self.parse_sidebar(character_page)
try:
character_title = filter(lambda x: u'Characters' in x.text, character_page.find_all(u'h2'))
media_info[u'characters'] = {}
if character_title:
character_title = character_title[0]
curr_elt = character_title.find_next_sibling(u'table')
while curr_elt:
curr_row = curr_elt.find(u'tr')
# character in second col.
character_col = curr_row.find_all(u'td', recursive=False)[1]
character_link = character_col.find(u'a')
character_name = ' '.join(reversed(character_link.text.split(u', ')))
link_parts = character_link.get(u'href').split(u'/')
# of the form /character/7373/Holo
character = self.session.character(int(link_parts[2])).set({'name': character_name})
role = character_col.find(u'small').text
media_info[u'characters'][character] = {'role': role}
curr_elt = curr_elt.find_next_sibling(u'table')
except:
if not self.session.suppress_parse_exceptions:
raise
return media_info |
<SYSTEM_TASK:>
Fetches the MAL media page and sets the current media's attributes.
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""Fetches the MAL media page and sets the current media's attributes.
:rtype: :class:`.Media`
:return: current media object.
""" |
media_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id)).text
self.set(self.parse(utilities.get_clean_dom(media_page)))
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.