Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,000 | def gen_centers(self):
d like centers to be at
c_des = np.linspace(0, self.cs.run_time, self.bfs)
self.c = np.zeros(len(c_des))
for ii, point in enumerate(c_des):
diff = abs(t - point)
self.c[ii] = x_track[np.where(diff == min(diff))[0][0]]'
if self.bfs == 2:
first = 0.5
last = 0.8
elif self.bfs == 3:
first = 0.4
last = 0.8
else:
first = 0.2
last = 0.8
des_c = np.linspace(first,last,self.bfs)
self.c = np.ones(len(des_c))
for n in range(len(des_c)):
self.c[n] = -np.log(des_c[n]) | Set the centre of the Gaussian basis
functions be spaced evenly throughout run time |
23,001 | def unregister_transformer(self, transformer):
if transformer in self._transformers:
self._transformers.remove(transformer) | Unregister a transformer instance. |
23,002 | def get_instance(self, payload):
return WorkerInstance(self._version, payload, workspace_sid=self._solution[], ) | Build an instance of WorkerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance |
23,003 | def assess_content(member,file_filter):
member_path = member.name.replace(,,1)
if len(member_path) == 0:
return False
if "skip_files" in file_filter:
if member_path in file_filter[]:
return False
if "assess_content" in file_filter:
if member_path in file_filter[]:
return True
return False | Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object. |
23,004 | def _validate_isvalid_quantity(self, isvalid_quantity, field, value):
quantity = Q_(value[0])
low_lim = 0.0 * units(property_units[field])
try:
if quantity <= low_lim:
self._error(
field, .format(property_units[field]),
)
except pint.DimensionalityError:
self._error(field,
+ property_units[field]
) | Checks for valid given value and appropriate units.
Args:
isvalid_quantity (`bool`): flag from schema indicating quantity to be checked.
field (`str`): property associated with quantity in question.
value (`list`): list whose first element is a string representing a value with units
The rule's arguments are validated against this schema:
{'isvalid_quantity': {'type': 'bool'}, 'field': {'type': 'str'},
'value': {'type': 'list'}} |
23,005 | def rsub(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rsub", other, axis=axis, level=level, fill_value=fill_value
) | Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied. |
23,006 | def set_wizard_step_description(self):
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
field = self.parent.step_kw_field.selected_fields()
is_raster = is_raster_layer(self.parent.layer)
if is_raster:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_raster
else:
text_label = multiple_classified_hazard_classifications_raster
text_label = text_label % (
subcategory[], self.layer_purpose[])
else:
if self.layer_mode == layer_mode_continuous:
text_label = multiple_continuous_hazard_classifications_vector
else:
text_label = multiple_classified_hazard_classifications_vector
text_label = text_label % (
subcategory[], self.layer_purpose[], field)
self.multi_classifications_label.setText(text_label) | Set the text for description. |
23,007 | def create_identity(self, name, attrs=[]):
params = {
: name,
: attrs
}
resp = self.request(, {: params})
return zobjects.Identity.from_dict(resp[]) | Create an Identity
:param: name identity name
:param: attrs list of dict of attributes (zimsoap format)
:returns: a zobjects.Identity object |
23,008 | def _parse_status(self, status):
if "rented" in status:
self.status = HouseStatus.RENTED
else:
m = list_auction_regex.search(status)
if m:
self.highest_bid = int(m.group())
if m.group("time_unit") == "day":
self.time_left = datetime.timedelta(days=int(m.group("time_left")))
else:
self.time_left = datetime.timedelta(hours=int(m.group("time_left")))
self.status = HouseStatus.AUCTIONED | Parses the status string found in the table and applies the corresponding values.
Parameters
----------
status: :class:`str`
The string containing the status. |
23,009 | def verify_weave_options(opt, parser):
cache_dir = os.environ[]
if opt.fixed_weave_cache:
if os.environ.get("FIXED_WEAVE_CACHE", None):
cache_dir = os.environ["FIXED_WEAVE_CACHE"]
elif getattr(sys, , False):
cache_dir = sys._MEIPASS
else:
cache_dir = os.path.join(os.getcwd(),"pycbc_inspiral")
os.environ[] = cache_dir
logging.debug("fixed_weave_cache: Setting weave cache to %s", cache_dir)
sys.path = [cache_dir] + sys.path
try: os.makedirs(cache_dir)
except OSError: pass
if not os.environ.get("LAL_DATA_PATH", None):
os.environ[] = cache_dir
if opt.per_process_weave_cache:
cache_dir = os.path.join(cache_dir, str(os.getpid()))
os.environ[] = cache_dir
logging.info("Setting weave cache to %s", cache_dir)
if not os.path.exists(cache_dir):
try:
os.makedirs(cache_dir)
except:
logging.error("Unable to create weave cache %s", cache_dir)
sys.exit(1)
if opt.clear_weave_cache_at_start:
_clear_weave_cache()
os.makedirs(cache_dir)
if opt.clear_weave_cache_at_end:
atexit.register(_clear_weave_cache)
signal.signal(signal.SIGTERM, _clear_weave_cache) | Parses the CLI options, verifies that they are consistent and
reasonable, and acts on them if they are
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes
parser : object
OptionParser instance. |
23,010 | def read(self):
line = self.trace_file.readline()
if line == :
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get(, None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00" | Read a line of data from the input source at a time. |
23,011 | def write_contents(self, filename, contents, directory=None):
filepath = "{}/{}".format(directory.rstrip("/"), filename) if directory else filename
self._write_to_zipfile(filepath, contents)
return filepath | write_contents: Write contents to filename in zip
Args:
contents: (str) contents of file
filename: (str) name of file in zip
directory: (str) directory in zipfile to write file to (optional)
Returns: path to file in zip |
23,012 | def gcpool(name, start, room, lenout=_default_len_out):
name = stypes.stringToCharP(name)
start = ctypes.c_int(start)
room = ctypes.c_int(room)
lenout = ctypes.c_int(lenout)
n = ctypes.c_int()
cvals = stypes.emptyCharArray(lenout, room)
found = ctypes.c_int()
libspice.gcpool_c(name, start, room, lenout, ctypes.byref(n),
ctypes.byref(cvals), ctypes.byref(found))
return [stypes.toPythonString(x.value) for x in
cvals[0:n.value]], bool(found.value) | Return the character value of a kernel variable from the kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gcpool_c.html
:param name: Name of the variable whose value is to be returned.
:type name: str
:param start: Which component to start retrieving for name.
:type start: int
:param room: The largest number of values to return.
:type room: int
:param lenout: The length of the output string.
:type lenout: int
:return: Values associated with name.
:rtype: list of str |
23,013 | def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1):
if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):
raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, "
"16, 18, 42 are supported.")
_validate_pianoroll(pianoroll)
def _drum_pattern_mask(res, tol):
if res == 24:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 4)
elif res == 12:
drum_pattern_mask = np.tile([1., tol, tol], 4)
elif res == 6:
drum_pattern_mask = np.tile([1., tol, tol], 2)
elif res == 18:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 3)
elif res == 9:
drum_pattern_mask = np.tile([1., tol, tol], 3)
elif res == 16:
drum_pattern_mask = np.tile([1., tol, 0., tol], 4)
elif res == 8:
drum_pattern_mask = np.tile([1., tol], 4)
elif res == 4:
drum_pattern_mask = np.tile([1., tol], 2)
return drum_pattern_mask
drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)
n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))
return n_in_pattern / np.count_nonzero(pianoroll) | Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes. |
23,014 | def find_data(folder):
for (path, directories, filenames) in os.walk(folder):
for filename in filenames:
yield os.path.join(, path, filename) | Include everything in the folder |
23,015 | def instantiate(self, **extra_args):
input_block = self.input_block.instantiate()
policy_backbone = self.policy_backbone.instantiate(**extra_args)
value_backbone = self.value_backbone.instantiate(**extra_args)
return StochasticPolicyModelSeparate(input_block, policy_backbone, value_backbone, extra_args[]) | Instantiate the model |
23,016 | def invite_by_email(self, email, sender=None, request=None, **kwargs):
try:
user = self.user_model.objects.get(email=email)
except self.user_model.DoesNotExist:
if "username" in inspect.getargspec(
self.user_model.objects.create_user
).args:
user = self.user_model.objects.create(
username=self.get_username(),
email=email,
password=self.user_model.objects.make_random_password(),
)
else:
user = self.user_model.objects.create(
email=email, password=self.user_model.objects.make_random_password()
)
user.is_active = False
user.save()
self.send_invitation(user, sender, **kwargs)
return user | Creates an inactive user with the information we know and then sends
an invitation email for that user to complete registration.
If your project uses email in a different way then you should make to
extend this method as it only checks the `email` attribute for Users. |
23,017 | def in_a_while(days=0, seconds=0, microseconds=0, milliseconds=0,
minutes=0, hours=0, weeks=0, time_format=TIME_FORMAT):
if not time_format:
time_format = TIME_FORMAT
return time_in_a_while(days, seconds, microseconds, milliseconds,
minutes, hours, weeks).strftime(time_format) | :param days:
:param seconds:
:param microseconds:
:param milliseconds:
:param minutes:
:param hours:
:param weeks:
:param time_format:
:return: Formatet string |
23,018 | def update_sdb(self, sdb_id, owner=None, description=None, user_group_permissions=None,
iam_principal_permissions=None):
old_data = self.get_sdb_by_id(sdb_id)
temp_data = {}
keys = (, , , )
for k in keys:
if k in old_data:
temp_data[k] = old_data[k]
if owner is not None:
temp_data["owner"] = owner
if description is not None:
temp_data["description"] = description
if user_group_permissions is not None and len(user_group_permissions) > 0:
temp_data["user_group_permissions"] = user_group_permissions
if iam_principal_permissions is not None and len(iam_principal_permissions) > 0:
temp_data["iam_principal_permissions"] = iam_principal_permissions
data = json.encoder.JSONEncoder().encode(temp_data)
sdb_resp = put_with_retry(self.cerberus_url + + sdb_id, data=str(data),
headers=self.HEADERS)
throw_if_bad_response(sdb_resp)
return sdb_resp.json() | Update a safe deposit box.
Keyword arguments:
owner (string) -- AD group that owns the safe deposit box
description (string) -- Description of the safe deposit box
user_group_permissions (list) -- list of dictionaries containing the key name and maybe role_id
iam_principal_permissions (list) -- list of dictionaries containing the key name iam_principal_arn
and role_id |
23,019 | def _to_EC_KEY(self):
key = self._lib.EC_KEY_new_by_curve_name(self._nid)
return _ffi.gc(key, _lib.EC_KEY_free) | Create a new OpenSSL EC_KEY structure initialized to use this curve.
The structure is automatically garbage collected when the Python object
is garbage collected. |
23,020 | def _display_matches_gnu_readline(self, substitution: str, matches: List[str],
longest_match_length: int) -> None:
if rl_type == RlType.GNU:
if self.display_matches:
matches_to_display = self.display_matches
longest_match_length = 0
for cur_match in matches_to_display:
cur_length = utils.ansi_safe_wcswidth(cur_match)
if cur_length > longest_match_length:
longest_match_length = cur_length
else:
matches_to_display = matches
matches_to_display, padding_length = self._pad_matches_to_display(matches_to_display)
longest_match_length += padding_length
readline_lib.rl_display_match_list(strings_array, len(encoded_matches), longest_match_length)
rl_force_redisplay() | Prints a match list using GNU readline's rl_display_match_list()
This exists to print self.display_matches if it has data. Otherwise matches prints.
:param substitution: the substitution written to the command line
:param matches: the tab completion matches to display
:param longest_match_length: longest printed length of the matches |
23,021 | def get(self, request, customer_uuid):
context = self._build_context(request, customer_uuid)
manage_learners_form = ManageLearnersForm(
user=request.user,
enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER]
)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context) | Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse |
23,022 | def get_upstream_fork_point(self):
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None | Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None |
23,023 | def create_presenter(self, request, target_route):
presenter_name = target_route.presenter_name()
if self.presenter_collection().has(presenter_name) is False:
raise RuntimeError( % presenter_name)
presenter_class = self.presenter_collection().presenter(presenter_name)
return self.presenter_factory().instantiate(presenter_class, request, target_route, self) | Create presenter from the given requests and target routes
:param request: client request
:param target_route: route to use
:return: WWebPresenter |
23,024 | def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description=,
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime=,
aws_environment_variables=None,
aws_kms_key_arn=None
):
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn =
if not aws_environment_variables:
aws_environment_variables = {}
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn | Given an existing function ARN, update the configuration variables. |
23,025 | def cache_values(self, results):
if results is None:
return
elif isinstance(results,np.ndarray):
results=[results]
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError()
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError( % (len(self.outputs)))
results=[results]
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) | loads into DebugSession cache |
23,026 | def upload_media(self, filename, progress=None):
self.oem_init()
return self._oem.upload_media(filename, progress) | Upload a file to be hosted on the target BMC
This will upload the specified data to
the BMC so that it will make it available to the system as an emulated
USB device.
:param filename: The filename to use, the basename of the parameter
will be given to the bmc.
:param filename: Optional callback for progress updates |
23,027 | def lfsr_next_one_seed(seed_iter, min_value_shift):
try:
seed = seed_iter.next()
except StopIteration:
return 0xFFFFFFFF
else:
if seed is None:
return 0xFFFFFFFF
else:
seed = int(seed) & 0xFFFFFFFF
working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF
min_value = 1 << min_value_shift
if working_seed < min_value:
working_seed = (seed << 24) & 0xFFFFFFFF
if working_seed < min_value:
working_seed ^= 0xFFFFFFFF
return working_seed | High-quality seeding for LFSR generators.
The LFSR generator components discard a certain number of their lower bits
when generating each output. The significant bits of their state must not
all be zero. We must ensure that when seeding the generator.
In case generators are seeded from an incrementing input (such as a system
timer), and between increments only the lower bits may change, we would
also like the lower bits of the input to change the initial state, and not
just be discarded. So we do basic manipulation of the seed input value to
ensure that all bits of the seed input affect the initial state. |
23,028 | def _condition_number(self):
ev = np.linalg.eig(np.matmul(self.xwins.swapaxes(1, 2), self.xwins))[0]
return np.sqrt(ev.max(axis=1) / ev.min(axis=1)) | Condition number of x; ratio of largest to smallest eigenvalue. |
23,029 | def index_worker_output(self, worker_name, md5, index_name, subfield):
if subfield:
data = self.work_request(worker_name, md5)[worker_name][subfield]
else:
data = self.work_request(worker_name, md5)[worker_name]
self.indexer.index_data(data, index_name=index_name, doc_type=) | Index worker output with the Indexer.
Args:
worker_name: 'strings', 'pe_features', whatever
md5: the md5 of the sample
index_name: the name of the index
subfield: index just this subfield (None for all)
Returns:
Nothing |
23,030 | def _get_orb_lobster(orb):
orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz",
"f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"]
try:
orbital = Orbital(orb_labs.index(orb[1:]))
return orbital
except AttributeError:
print("Orb not in list") | Args:
orb: string representation of orbital
Returns:
Orbital |
23,031 | def config2(self):
config = []
data = {}
self.cnxn.xfer([0x3D])
sleep(10e-3)
for i in range(9):
resp = self.cnxn.xfer([0x00])[0]
config.append(resp)
data["AMSamplingInterval"] = self._16bit_unsigned(config[0], config[1])
data["AMIdleIntervalCount"] = self._16bit_unsigned(config[2], config[3])
data[] = config[4]
data[] = config[5]
data[] = self._16bit_unsigned(config[6], config[7])
data[] = config[8]
sleep(0.1)
return data | Read the second set of configuration variables and return as a dictionary.
**NOTE: This method is supported by firmware v18+.**
:rtype: dictionary
:Example:
>>> a.config2()
{
'AMFanOnIdle': 0,
'AMIdleIntervalCount': 0,
'AMMaxDataArraysInFile': 61798,
'AMSamplingInterval': 1,
'AMOnlySavePMData': 0,
'AMLaserOnIdle': 0
} |
23,032 | def _execute_xmpp(connected_callback):
from indico_chat.plugin import ChatPlugin
check_config()
jid = ChatPlugin.settings.get()
password = ChatPlugin.settings.get()
if not in jid:
jid = .format(jid, ChatPlugin.settings.get())
result = [None, None]
app = current_app._get_current_object()
def _session_start(event):
try:
with app.app_context():
result[0] = connected_callback(xmpp)
except Exception as e:
result[1] = e
if isinstance(e, IqError):
current_plugin.logger.exception(, e.condition)
else:
current_plugin.logger.exception()
finally:
xmpp.disconnect(wait=0)
xmpp = ClientXMPP(jid, password)
xmpp.register_plugin()
xmpp.register_plugin()
xmpp.register_plugin()
xmpp.add_event_handler(, _session_start)
try:
xmpp.connect()
except Exception:
current_plugin.logger.exception()
xmpp.disconnect()
raise
try:
xmpp.process(threaded=False)
finally:
xmpp.disconnect(wait=0)
if result[1] is not None:
raise result[1]
return result[0] | Connects to the XMPP server and executes custom code
:param connected_callback: function to execute after connecting
:return: return value of the callback |
23,033 | def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
) | Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation. |
23,034 | def offset(self, offset):
self.log(u"Applying offset to all fragments...")
self.log([u" Offset %.3f", offset])
for fragment in self.fragments:
fragment.interval.offset(
offset=offset,
allow_negative=False,
min_begin_value=self.begin,
max_end_value=self.end
)
self.log(u"Applying offset to all fragments... done") | Move all the intervals in the list by the given ``offset``.
:param offset: the shift to be applied
:type offset: :class:`~aeneas.exacttiming.TimeValue`
:raises TypeError: if ``offset`` is not an instance of ``TimeValue`` |
23,035 | def _update_proxy(self, change):
if change[] == :
self.proxy.update_points(change)
else:
super(MapPolyline, self)._update_proxy(change) | An observer which sends the state change to the proxy. |
23,036 | def save(self, filename, compressed=True):
if not self.has_data:
return False
_, file_ext = os.path.splitext(filename)
if compressed:
if file_ext != COMPRESSED_TENSOR_EXT:
raise ValueError( %(COMPRESSED_TENSOR_EXT))
np.savez_compressed(filename,
self.data[:self.cur_index,...])
else:
if file_ext != TENSOR_EXT:
raise ValueError()
np.save(filename, self.data[:self.cur_index,...])
return True | Save a tensor to disk. |
23,037 | def _process(self, word: str) -> List[str]:
if len(word.strip()) == 0:
return []
word = self.convert_consonantal_i(word)
my_word = " " + word + " "
letters = list(my_word)
positions = []
for dipth in self.diphthongs:
if dipth in my_word:
dipth_matcher = re.compile("{}".format(dipth))
matches = dipth_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
matches = self.kw_matcher.finditer(my_word)
for match in matches:
(start, end) = match.span()
positions.append(start)
letters = string_utils.merge_next(letters, positions)
letters = string_utils.remove_blanks(letters)
positions.clear()
if not self._contains_vowels("".join(letters)):
return ["".join(letters).strip()]
positions = self._starting_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_right(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._starting_consonants_only(letters)
positions = self._ending_consonants_only(letters)
while len(positions) > 0:
letters = string_utils.move_consonant_left(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._ending_consonants_only(letters)
positions = self._find_solo_consonant(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_solo_consonant(letters)
positions = self._find_consonant_cluster(letters)
while len(positions) > 0:
letters = self._move_consonant(letters, positions)
letters = string_utils.remove_blanks(letters)
positions = self._find_consonant_cluster(letters)
return letters | Process a word into a list of strings representing the syllables of the word. This
method describes rules for consonant grouping behaviors and then iteratively applies those
rules the list of letters that comprise the word, until all the letters are grouped into
appropriate syllable groups.
:param word:
:return: |
23,038 | def detect_keep_boundary(start, end, namespaces):
result_start, result_end = False, False
parent_start = start.getparent()
parent_end = end.getparent()
if parent_start.tag == "{%s}p" % namespaces[]:
result_start = len(parent_start.getchildren()) > 1
if parent_end.tag == "{%s}p" % namespaces[]:
result_end = len(parent_end.getchildren()) > 1
return result_start, result_end | a helper to inspect a link and see if we should keep the link boundary |
23,039 | def get_from(input_file, property_names):
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection[]
values = [tuple([feat[].get(x)
for x in property_names]) for feat in features]
return values | Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples. |
23,040 | def itemconfigure(self, iid, rectangle_options, text_options):
rectangle_id, text_id = self._markers[iid]["rectangle_id"], self._markers[iid]["text_id"]
if len(rectangle_options) != 0:
self._timeline.itemconfigure(rectangle_id, **rectangle_options)
if len(text_options) != 0:
self._timeline.itemconfigure(text_id, **text_options) | Configure options of items drawn on the Canvas
Low-level access to the individual elements of markers and other
items drawn on the timeline Canvas. All modifications are
overwritten when the TimeLine is redrawn. |
23,041 | def import_pyfiles(path):
n = 0
for pyfile in glob.glob(os.path.join(path, )):
m = import_file(pyfile)
IMPORTED_BUILD_SOURCES.append(m)
n += 1
return n | Import all *.py files in specified directory. |
23,042 | def dependencies(self):
cpio = self.rpm.gzip_file.read()
content = cpio.read()
return [] | Read the contents of the rpm itself
:return: |
23,043 | def set_axis_options(self, row, column, text):
subplot = self.get_subplot_at(row, column)
subplot.set_axis_options(text) | Set additionnal options as plain text. |
23,044 | def get_stack_info(self, stack):
stack_name = stack[]
try:
template = self.cloudformation.get_template(
StackName=stack_name)[]
except botocore.exceptions.ClientError as e:
if "does not exist" not in str(e):
raise
raise exceptions.StackDoesNotExist(stack_name)
parameters = self.params_as_dict(stack.get(, []))
return [json.dumps(template), parameters] | Get the template and parameters of the stack currently in AWS
Returns [ template, parameters ] |
23,045 | def top(self, sort_by):
sort = sorted(self.results, key=sort_by)
return sort | Get the best results according to your custom sort method. |
23,046 | def get_slot_nio_bindings(self, slot_number):
nio_bindings = yield from self._hypervisor.send(.format(name=self._name,
slot_number=slot_number))
return nio_bindings | Returns slot NIO bindings.
:param slot_number: slot number
:returns: list of NIO bindings |
23,047 | def imagetransformer_b12l_4h_b256_uncond_dr03_tpu():
hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet()
update_hparams_for_tpu(hparams)
hparams.batch_size = 4
hparams.num_heads = 4
hparams.num_decoder_layers = 12
hparams.block_length = 256
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.learning_rate = 0.5
hparams.learning_rate_warmup_steps = 4000
hparams.layer_preprocess_sequence = "none"
hparams.layer_postprocess_sequence = "dan"
hparams.layer_prepostprocess_dropout = 0.3
hparams.unconditional = True
return hparams | works very well on 4x4. |
23,048 | def from_p12_keyfile(cls, service_account_email, filename,
private_key_password=None, scopes=,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
with open(filename, ) as file_obj:
private_key_pkcs12 = file_obj.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri) | Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library. |
23,049 | def schedule(self, duration, at=None, delay=None, callback=None):
delay = self.calculateDelay(at, delay)
self.callback = callback
logger.info( + self.name + + str(duration) + + str(delay) + )
self.statuses.append({: ScheduledMeasurementStatus.SCHEDULED.name, : datetime.utcnow()})
threading.Timer(delay, self.execute, [duration]).start() | schedules the measurement (to execute asynchronously).
:param duration: how long to run for.
:param at: the time to start at.
:param delay: the time to wait til starting (use at or delay).
:param callback: a callback.
:return: nothing. |
23,050 | def get_rotated(self, angle):
result = self.copy()
result.rotate(angle)
return result | Return a vector rotated by angle from the given vector. Angle measured in radians counter-clockwise. |
23,051 | def _do_ffts(detector, stream, Nc):
min_fftlen = int(stream[0][0].data.shape[0] +
detector.data[0].shape[0] - Nc)
fftlen = scipy.fftpack.next_fast_len(min_fftlen)
mplen = stream[0][0].data.shape[0]
ulen = detector.data[0].shape[0]
num_st_fd = [np.fft.rfft(tr.data, n=fftlen)
for tr in stream[0]]
denom_st_fd = [np.fft.rfft(np.square(tr.data), n=fftlen)
for tr in stream[0]]
w = np.fft.rfft(np.ones(detector.data[0].shape[0]),
n=fftlen)
detector_fd = []
for dat_mat in detector.data:
detector_fd.append(np.array([np.fft.rfft(col[::-1], n=fftlen)
for col in dat_mat.T]))
return detector_fd, denom_st_fd, num_st_fd, w, ulen, mplen | Perform ffts on data, detector and denominator boxcar
:type detector: eqcorrscan.core.subspace.Detector
:param detector: Detector object for doing detecting
:type stream: list of obspy.core.stream.Stream
:param stream: List of streams processed according to detector
:type Nc: int
:param Nc: Number of channels in data. 1 for non-multiplexed
:return: list of time-reversed detector(s) in freq domain
:rtype: list
:return: list of squared data stream(s) in freq domain
:rtype: list
:return: list of data stream(s) in freq domain
:return: detector-length boxcar in freq domain
:rtype: numpy.ndarray
:return: length of detector
:rtype: int
:return: length of data
:rtype: int |
23,052 | def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):
protein_df = pd.read_feather(protein_feather).set_index()
from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE
aggregators = {
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, ]},
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, , , , ]},
: {: _aa_property_dict_one[],
: [, , , , , ,
]},
: {: _aa_property_dict_one[],
: [, , , , ]},
: {: _aa_property_dict_one[],
: [, , , , ]},
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, ]},
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, ]},
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, , , ,
]},
: {: EXTENDED_AA_PROPERTY_DICT_ONE[],
: [, , , ,
]}}
for suffix, info in aggregators.items():
agg_residues = info[]
for prefix in info[]:
to_add_idxes = []
for agg_res in agg_residues:
to_add_idx = prefix + + agg_res
if to_add_idx in protein_df.index:
to_add_idxes.append(to_add_idx)
subseq_agged_col = protein_df.loc[to_add_idxes, :].sum()
protein_df.loc[prefix + + suffix] = subseq_agged_col
if length_filter_pid:
keep_cols = protein_df.loc[][protein_df.loc[] > protein_df.at[, ] * length_filter_pid].index
protein_df = protein_df[keep_cols]
if copynum_scale:
if not isinstance(copynum_df, pd.DataFrame):
raise ValueError()
protein_id = op.basename(protein_feather).split()[0]
if protein_id in copynum_df.index:
copynum = copynum_df.at[protein_id, ]
if copynum > 0:
protein_df = protein_df * copynum
return protein_df | Load a feather of amino acid counts for a protein.
Args:
protein_feather (str): path to feather file
copynum_scale (bool): if counts should be multiplied by protein copy number
copynum_df (DataFrame): DataFrame of copy numbers
Returns:
DataFrame: of counts with some aggregated together |
23,053 | def make_plot(
self, count, plot=None, show=False, plottype=,
bar=dict(alpha=0.15, color=, linewidth=1.0, edgecolor=),
errorbar=dict(fmt=),
gaussian=dict(ls=, c=)
):
if numpy.ndim(count) != 1:
raise ValueError()
if plot is None:
import matplotlib.pyplot as plot
if len(count) == len(self.midpoints) + 2:
norm = numpy.sum(count)
data = numpy.asarray(count[1:-1]) / norm
elif len(count) != len(self.midpoints):
raise ValueError(
% (len(count), len(self.midpoints))
)
else:
data = numpy.asarray(count)
if plottype == :
data = numpy.cumsum(data)
data = numpy.array([0.] + data.tolist())
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.bins, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.fill_between(self.bins, 0, data_mean, **bar)
plot.plot([self.bins[0], self.bins[-1]], [0.5, 0.5], )
plot.plot([self.bins[0], self.bins[-1]], [0.158655254, 0.158655254], )
plot.plot([self.bins[0], self.bins[-1]], [0.841344746, 0.841344746], )
else:
if plottype == :
data = data / self.widths
if errorbar is not None:
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.midpoints, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.bar(self.bins[:-1], mean(data), width=self.widths, align=, **bar)
if gaussian is not None and self.g is not None:
if plottype == :
x = numpy.array(self.bins.tolist() + self.midpoints.tolist())
x.sort()
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
yspline = cspline.CSpline(x, y)
plot.ylabel()
plot.ylim(0, 1.0)
elif plottype in [, ]:
x = self.bins
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
x = self.midpoints
y = (y[1:] - y[:-1])
if plottype == :
y /= self.widths
plot.ylabel()
else:
plot.ylabel()
yspline = cspline.CSpline(x, y)
else:
raise ValueError( + str(plottype))
if len(x) < 100:
ny = int(100. / len(x) + 0.5) * len(x)
else:
ny = len(x)
xplot = numpy.linspace(x[0], x[-1], ny)
plot.plot(xplot, yspline(xplot), **gaussian)
if show:
plot.show()
return plot | Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified. |
23,054 | def type_id(self):
try:
return ContentType.objects.get_for_model(self.model, for_concrete_model=False).id
except DatabaseError as e:
raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e))) | Shortcut to retrieving the ContentType id of the model. |
23,055 | def send_email(Source=None, Destination=None, Message=None, ReplyToAddresses=None, ReturnPath=None, SourceArn=None, ReturnPathArn=None, Tags=None, ConfigurationSetName=None):
pass | Composes an email message based on input data, and then immediately queues the message for sending.
There are several important points to know about SendEmail :
See also: AWS API Documentation
Examples
The following example sends a formatted email:
Expected Output:
:example: response = client.send_email(
Source='string',
Destination={
'ToAddresses': [
'string',
],
'CcAddresses': [
'string',
],
'BccAddresses': [
'string',
]
},
Message={
'Subject': {
'Data': 'string',
'Charset': 'string'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'string'
},
'Html': {
'Data': 'string',
'Charset': 'string'
}
}
},
ReplyToAddresses=[
'string',
],
ReturnPath='string',
SourceArn='string',
ReturnPathArn='string',
Tags=[
{
'Name': 'string',
'Value': 'string'
},
],
ConfigurationSetName='string'
)
:type Source: string
:param Source: [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
:type Destination: dict
:param Destination: [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
:type Message: dict
:param Message: [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
:type ReplyToAddresses: list
:param ReplyToAddresses: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
:type ReturnPath: string
:param ReturnPath: The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
:type SourceArn: string
:param SourceArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type ReturnPathArn: string
:param ReturnPathArn: This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .
For more information about sending authorization, see the Amazon SES Developer Guide .
:type Tags: list
:param Tags: A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
:type ConfigurationSetName: string
:param ConfigurationSetName: The name of the configuration set to use when you send an email using SendEmail .
:rtype: dict
:return: {
'MessageId': 'string'
}
:returns:
Source (string) -- [REQUIRED]
The email address that is sending the email. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES. For information about verifying identities, see the Amazon SES Developer Guide .
If you are sending on behalf of another user and have been permitted to do so by a sending authorization policy, then you must also specify the SourceArn parameter. For more information about sending authorization, see the Amazon SES Developer Guide .
In all cases, the email address must be 7-bit ASCII. If the text must contain any other characters, then you must use MIME encoded-word syntax (RFC 2047) instead of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?= . For more information, see RFC 2047 .
Destination (dict) -- [REQUIRED]
The destination for this email, composed of To:, CC:, and BCC: fields.
ToAddresses (list) --The To: field(s) of the message.
(string) --
CcAddresses (list) --The CC: field(s) of the message.
(string) --
BccAddresses (list) --The BCC: field(s) of the message.
(string) --
Message (dict) -- [REQUIRED]
The message to be sent.
Subject (dict) -- [REQUIRED]The subject of the message: A short summary of the content, which will appear in the recipient's inbox.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Body (dict) -- [REQUIRED]The message body.
Text (dict) --The content of the message, in text format. Use this for text-based email clients, or clients on high-latency networks (such as mobile devices).
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
Html (dict) --The content of the message, in HTML format. Use this for email clients that can process HTML. You can include clickable links, formatted text, and much more in an HTML message.
Data (string) -- [REQUIRED]The textual data of the content.
Charset (string) --The character set of the content.
ReplyToAddresses (list) -- The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address will receive the reply.
(string) --
ReturnPath (string) -- The email address to which bounces and complaints are to be forwarded when feedback forwarding is enabled. If the message cannot be delivered to the recipient, then an error message will be returned from the recipient's ISP; this message will then be forwarded to the email address specified by the ReturnPath parameter. The ReturnPath parameter is never overwritten. This email address must be either individually verified with Amazon SES, or from a domain that has been verified with Amazon SES.
SourceArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to send for the email address specified in the Source parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to send from [email protected] , then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the Source to be [email protected] .
For more information about sending authorization, see the Amazon SES Developer Guide .
ReturnPathArn (string) -- This parameter is used only for sending authorization. It is the ARN of the identity that is associated with the sending authorization policy that permits you to use the email address specified in the ReturnPath parameter.
For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com ) attaches a policy to it that authorizes you to use [email protected] , then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com , and the ReturnPath to be [email protected] .
For more information about sending authorization, see the Amazon SES Developer Guide .
Tags (list) -- A list of tags, in the form of name/value pairs, to apply to an email that you send using SendEmail . Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
(dict) --Contains the name and value of a tag that you can provide to SendEmail or SendRawEmail to apply to an email.
Message tags, which you use with configuration sets, enable you to publish email sending events. For information about using configuration sets, see the Amazon SES Developer Guide .
Name (string) -- [REQUIRED]The name of the tag. The name must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
Value (string) -- [REQUIRED]The value of the tag. The value must:
Contain only ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
Contain less than 256 characters.
ConfigurationSetName (string) -- The name of the configuration set to use when you send an email using SendEmail . |
23,056 | def collect_static_files(src_map, dst):
for rel_src, abs_src in src_map.iteritems():
abs_dst = os.path.join(dst, rel_src)
copy_file(abs_src, abs_dst) | Collect all static files and move them into a temporary location.
This is very similar to the ``collectstatic`` command. |
23,057 | def dbus_readBytesTwoFDs(self, fd1, fd2, byte_count):
result = bytearray()
for fd in (fd1, fd2):
f = os.fdopen(fd, )
result.extend(f.read(byte_count))
f.close()
return result | Reads byte_count from fd1 and fd2. Returns concatenation. |
23,058 | def knock_out(self):
self.functional = False
for reaction in self.reactions:
if not reaction.functional:
reaction.bounds = (0, 0) | Knockout gene by marking it as non-functional and setting all
associated reactions bounds to zero.
The change is reverted upon exit if executed within the model as
context. |
23,059 | def replace_url_query_values(url, replace_vals):
if not in url:
return url
parsed_url = urlparse(url)
query = dict(parse_qsl(parsed_url.query))
query.update(replace_vals)
return .format(url.split()[0], urlencode(query)) | Replace querystring values in a url string.
>>> url = 'http://helloworld.com/some/path?test=5'
>>> replace_vals = {'test': 10}
>>> replace_url_query_values(url=url, replace_vals=replace_vals)
'http://helloworld.com/some/path?test=10' |
23,060 | def get_answers(self):
if in self.meta and in self.meta.inner_hits:
return self.meta.inner_hits.answer.hits
return list(self.search_answers()) | Get answers either from inner_hits already present or by searching
elasticsearch. |
23,061 | def dump_to_store(dataset, store, writer=None, encoder=None,
encoding=None, unlimited_dims=None):
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer,
unlimited_dims=unlimited_dims) | Store dataset contents to a backends.*DataStore object. |
23,062 | def fastqtransform(transform, fastq1, fastq2, fastq3, fastq4, keep_fastq_tags,
separate_cb, demuxed_cb, cores, fastq1out, fastq2out,
min_length):
transform = json.load(open(transform))
options = _infer_transform_options(transform)
read_template =
logger.info("Transforming %s." % fastq1)
if options.dual_index:
logger.info("Detected dual cellular indexes.")
if separate_cb:
read_template +=
else:
read_template +=
elif options.triple_index:
logger.info("Detected triple cellular indexes.")
if separate_cb:
read_template +=
else:
read_template +=
elif options.CB or demuxed_cb:
logger.info("Detected cellular barcodes.")
read_template +=
if options.MB:
logger.info("Detected UMI.")
read_template +=
if options.SB:
logger.info("Detected sample.")
read_template +=
read_template += "{readnum}"
if keep_fastq_tags:
read_template +=
read_template +=
paired = fastq1out and fastq2out
read1_regex = re.compile(transform[])
read2_regex = re.compile(transform[]) if fastq2 else None
read3_regex = re.compile(transform[]) if fastq3 else None
read4_regex = re.compile(transform[]) if fastq4 else None
fastq_file1 = read_fastq(fastq1)
fastq_file2 = read_fastq(fastq2)
fastq_file3 = read_fastq(fastq3)
fastq_file4 = read_fastq(fastq4)
transform = partial(transformer, read1_regex=read1_regex,
read2_regex=read2_regex, read3_regex=read3_regex,
read4_regex=read4_regex, paired=paired)
fastq1out_fh = write_fastq(fastq1out)
fastq2out_fh = write_fastq(fastq2out)
p = multiprocessing.Pool(cores)
try :
zzip = itertools.izip
except AttributeError:
zzip = zip
chunks = tz.partition_all(10000, zzip(fastq_file1, fastq_file2, fastq_file3,
fastq_file4))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(transform, list(bigchunk)):
if paired:
for read1_dict, read2_dict in tz.partition(2, chunk):
if options.dual_index:
if not separate_cb:
read1_dict[] = read1_dict[] + read1_dict[]
read2_dict[] = read2_dict[] + read2_dict[]
if demuxed_cb:
read1_dict[] = demuxed_cb
read2_dict[] = demuxed_cb
if keep_fastq_tags:
name, tag = read1_dict[].split()
read1_dict[] = name
read1_dict[] = tag
name, tag = read2_dict[].split()
read2_dict[] = name
read2_dict[] = tag
else:
read1_dict[] = read1_dict[].partition()[0]
read2_dict[] = read2_dict[].partition()[0]
read1_dict = _extract_readnum(read1_dict)
read2_dict = _extract_readnum(read2_dict)
tooshort = (len(read1_dict[]) < min_length or
len(read2_dict[]) < min_length)
if not tooshort:
fastq1out_fh.write(read_template.format(**read1_dict))
fastq2out_fh.write(read_template.format(**read2_dict))
else:
for read1_dict in chunk:
if options.dual_index:
if not separate_cb:
read1_dict[] = read1_dict[] + read1_dict[]
if demuxed_cb:
read1_dict[] = demuxed_cb
if keep_fastq_tags:
name, tag = read1_dict[].split()
read1_dict[] = name
read1_dict[] = tag
else:
read1_dict[] = read1_dict[].partition()[0]
read1_dict = _extract_readnum(read1_dict)
if len(read1_dict[]) >= min_length:
if fastq1out_fh:
fastq1out_fh.write(read_template.format(**read1_dict))
else:
sys.stdout.write(read_template.format(**read1_dict)) | Transform input reads to the tagcounts compatible read layout using
regular expressions as defined in a transform file. Outputs new format to
stdout. |
23,063 | def load_creditscoring2(cost_mat_parameters=None):
module_path = dirname(__file__)
raw_data = pd.read_csv(join(module_path, , ), delimiter=, compression=)
descr = open(join(module_path, , )).read()
raw_data = raw_data.loc[raw_data[] != ]
raw_data = raw_data.loc[(raw_data[].values.astype(np.float) > 100)]
raw_data = raw_data.loc[(raw_data[].values.astype(np.float) < 10000)]
target = raw_data[].values.astype(np.int)
cols_con = [, , , , ,
, , , ,
, ]
data = raw_data[cols_con].astype(float)
cols_dummies = [, , , ,
, , ,
, ]
for col_ in cols_dummies:
temp_ = pd.get_dummies(raw_data[col_], prefix=col_)
data = data.join(temp_)
if cost_mat_parameters is None:
cost_mat_parameters = {: 0.63 / 12,
: 0.165 / 12,
: 25000 * 0.33,
: 24,
: 3,
: .75}
n_samples = data.shape[0]
pi_1 = target.mean()
monthly_income = data[].values * 0.33
cost_mat = _creditscoring_costmat(monthly_income, np.zeros(n_samples), pi_1, cost_mat_parameters)
return Bunch(data=data.values, target=target, cost_mat=cost_mat,
target_names=[, ], DESCR=descr,
feature_names=data.columns.values, name=) | Load and return the credit scoring PAKDD 2009 competition dataset (classification).
The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset.
Parameters
----------
cost_mat_parameters : Dictionary-like object, optional (default=None)
If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd'
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'cost_mat', the cost matrix of each example,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the full description of the dataset.
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring",
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50
>>> from costcla.datasets import load_creditscoring2
>>> data = load_creditscoring2()
>>> data.target[[10, 17, 50]]
array([1, 0, 0])
>>> data.cost_mat[[10, 17, 50]]
array([[ 209. , 547.965, 0. , 0. ],
[ 24. , 274.725, 0. , 0. ],
[ 89. , 371.25 , 0. , 0. ]]) |
23,064 | def get_template_loader_for_path(self, path, use_cache=True):
/var/mytemplates/
if use_cache:
try:
return self.template_loaders[path]
except KeyError:
pass
loader = MakoTemplateLoader(path, None)
if use_cache:
self.template_loaders[path] = loader
return loader | Returns a template loader object for the given directory path.
For example, get_template_loader('/var/mytemplates/') will return
a loader for that specific directory.
Normally, you should not have to call this method. Django automatically
adds request.dmp.render() and request.dmp.render_to_string() on each
request.
This method is useful when you want a custom template loader for a specific
directory that may be outside your project directory or that is otherwise
not contained in a normal Django app. If the directory is inside an app,
call get_template_loader() instead.
Unless use_cache=False, this method caches template loaders in the DMP
cache for later use. |
23,065 | def demonize(self):
if access(self.pid_file_name, F_OK):
pid = self.read_pid()
try:
kill(pid, 0)
self.stderr.write("process is already running\n")
return False
except OSError as e:
if e.errno == errno.ESRCH:
self.delete_pid(force_del=True)
else:
self.stderr.write("demonize failed, something went wrong: %d (%s)\n" % (e.errno, e.strerror))
return False
try:
pid = fork()
if pid > 0:
timeout = time() + 60
while self.read_pid() is None:
self.stderr.write("waiting for pid..\n")
sleep(0.5)
if time() > timeout:
break
self.stderr.write("pid is %d\n" % self.read_pid())
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 1. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
setsid()
umask(0)
try:
pid = fork()
if pid > 0:
sys.exit(0)
except OSError as e:
self.stderr.write("demonize failed in 2. Fork: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
self.write_pid()
return True | do the double fork magic |
23,066 | def generateAPIRootBody(self):
s body text. The method calls
:func:`~exhale.graph.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`~exhale.graph.ExhaleRoot.generateViewHierarchies` followed by
:func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI` to generate both
hierarchies as well as the full API listing. As a result, three files will now
be ready:
1. ``self.class_hierarchy_file``
2. ``self.file_hierarchy_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it.
.. raw:: html
<script type="text/javascript">
/* NOTE: if you are reading this, Exhale generated this directly. */
$(document).ready(function() {{
/* Inspired by very informative answer to get color of links:
https://stackoverflow.com/a/2707837/3814202 */
var $fake_link = $().hide().appendTo("body");
var linkColor = $fake_link.css("color");
$fake_link.remove();
var $fake_p = $().hide().appendTo("body");
var iconColor = $fake_p.css("color");
$fake_p.remove();
/* After much deliberation, using JavaScript directly to enforce that the
* link and glyphicon receive different colors is fruitless, because the
* bootstrap treeview library will overwrite the style every time. Instead,
* leaning on the library code itself to append some styling to the head,
* I choose to mix a couple of things:
*
* 1. Set the `color` property of bootstrap treeview globally, this would
* normally affect the color of both the link text and the icon.
* 2. Apply custom forced styling of the glyphicon itself in order to make
* it a little more clear to the user (via different colors) that the
* act of clicking the icon and the act of clicking the link text perform
* different actions. The icon expands, the text navigates to the page.
*/
// Part 1: use linkColor as a parameter to bootstrap treeview
// apply the class view hierarchy
$("
data: {class_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// apply the file view hierarchy
$("
data: {file_func_name}(),
enableLinks: true,
color: linkColor,
showTags: {show_tags},
collapseIcon: "{collapse_icon}",
expandIcon: "{expand_icon}",
levels: {levels},
onhoverColor: "{onhover_color}"
}});
// Part 2: override the style of the glyphicons by injecting some CSS
$( +
+
+ iconColor + +
+
).appendTo();
}});
</script>
'.format(
icon_mimic=configs.treeViewBootstrapIconMimicColor,
class_idx=configs._class_hierarchy_id,
class_func_name=configs._bstrap_class_hierarchy_fn_data_name,
file_idx=configs._file_hierarchy_id,
file_func_name=configs._bstrap_file_hierarchy_fn_data_name,
show_tags="true" if configs.treeViewBootstrapUseBadgeTags else "false",
collapse_icon=configs.treeViewBootstrapCollapseIcon,
expand_icon=configs.treeViewBootstrapExpandIcon,
levels=configs.treeViewBootstrapLevels,
onhover_color=configs.treeViewBootstrapOnhoverColor
)))
except:
utils.fancyError(
"Unable to create the root api body: [{0}]".format(self.full_root_file_path)
) | Generates the root library api file's body text. The method calls
:func:`~exhale.graph.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`~exhale.graph.ExhaleRoot.generateViewHierarchies` followed by
:func:`~exhale.graph.ExhaleRoot.generateUnabridgedAPI` to generate both
hierarchies as well as the full API listing. As a result, three files will now
be ready:
1. ``self.class_hierarchy_file``
2. ``self.file_hierarchy_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it. |
23,067 | async def handler(event):
q1 = event.pattern_match.group(1)
q2 = urllib.parse.quote(q1)
await asyncio.wait([
event.delete(),
event.respond(DOCS.format(q1, q2), reply_to=event.reply_to_msg_id)
]) | #docs or #ref query: Like #search but shows the query. |
23,068 | def _ipopo_setup_callback(cls, context):
assert inspect.isclass(cls)
assert isinstance(context, FactoryContext)
if context.callbacks is not None:
callbacks = context.callbacks.copy()
else:
callbacks = {}
functions = inspect.getmembers(cls, inspect.isroutine)
for _, func in functions:
if not hasattr(func, constants.IPOPO_METHOD_CALLBACKS):
continue
method_callbacks = getattr(func, constants.IPOPO_METHOD_CALLBACKS)
if not isinstance(method_callbacks, list):
_logger.warning(
"Invalid callback information %s in %s",
constants.IPOPO_METHOD_CALLBACKS,
get_method_description(func),
)
continue
"\tPrevious callback : %s\n"
"\tNew callback : %s",
_callback,
cls.__name__,
get_method_description(callbacks[_callback]),
get_method_description(func),
)
callbacks[_callback] = func
context.callbacks.clear()
context.callbacks.update(callbacks) | Sets up the class _callback dictionary
:param cls: The class to handle
:param context: The factory class context |
23,069 | def cookie_name_check(cookie_name):
cookie_match = WHTTPCookie.cookie_name_non_compliance_re.match(cookie_name.encode())
return len(cookie_name) > 0 and cookie_match is None | Check cookie name for validity. Return True if name is valid
:param cookie_name: name to check
:return: bool |
23,070 | def check_solution(self, tx_context, flags=None, traceback_f=None):
for t in self.puzzle_and_solution_iterator(tx_context, flags=flags, traceback_f=traceback_f):
puzzle_script, solution_stack, flags, sighash_f = t
vm = self.VM(puzzle_script, tx_context, sighash_f, flags=flags, initial_stack=solution_stack[:])
vm.is_solution_script = False
vm.traceback_f = traceback_f
stack = vm.eval_script()
if len(stack) == 0 or not vm.bool_from_script_bytes(stack[-1]):
raise self.ScriptError("eval false", errno.EVAL_FALSE)
if flags & VERIFY_CLEANSTACK and len(stack) != 1:
raise self.ScriptError("stack not clean after evaluation", errno.CLEANSTACK) | tx_context: information about the transaction that the VM may need
flags: gives the VM hints about which additional constraints to check |
23,071 | def add_positional_embedding_nd(x, max_length, name=None):
with tf.name_scope("add_positional_embedding_nd"):
x_shape = common_layers.shape_list(x)
num_dims = len(x_shape) - 2
depth = x_shape[-1]
base_shape = [1] * (num_dims + 1) + [depth]
base_start = [0] * (num_dims + 2)
base_size = [-1] + [1] * num_dims + [depth]
for i in range(num_dims):
shape = base_shape[:]
start = base_start[:]
size = base_size[:]
shape[i + 1] = max_length
size[i + 1] = x_shape[i + 1]
var = tf.get_variable(
name + "_%d" % i,
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5))
var = var * depth**0.5
x += tf.slice(var, start, size)
return x | Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x. |
23,072 | def modify_calendar_resource(self, calres, attrs):
attrs = [{: k, : v} for k, v in attrs.items()]
self.request(, {
: self._get_or_fetch_id(
calres, self.get_calendar_resource),
: attrs
}) | :param calres: a zobjects.CalendarResource
:param attrs: a dictionary of attributes to set ({key:value,...}) |
23,073 | def keypair_from_seed(seed, index=0):
h = blake2b(digest_size=32)
h.update(seed + struct.pack(">L", index))
priv_key = h.digest()
pub_key = private_to_public_key(priv_key)
return {: priv_key, : pub_key} | Generates a deterministic keypair from `seed` based on `index`
:param seed: bytes value of seed
:type seed: bytes
:param index: offset from seed
:type index: int
:return: dict of the form: {
'private': private_key
'public': public_key
} |
23,074 | def add_edge(self, a, b):
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a) | Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge |
23,075 | def query_requests(cls, admin, eager=False):
if hasattr(admin, ) and admin.is_superadmin:
q1 = GroupAdmin.query.with_entities(
GroupAdmin.group_id)
else:
q1 = GroupAdmin.query_by_admin(admin).with_entities(
GroupAdmin.group_id)
q2 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q1),
)
q3 = Membership.query_by_user(
user=admin, state=MembershipState.ACTIVE
).with_entities(Membership.id_group)
q4 = GroupAdmin.query.filter(
GroupAdmin.admin_type == , GroupAdmin.admin_id.in_(q3)
).with_entities(GroupAdmin.group_id)
q5 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q4))
query = q2.union(q5)
return query | Get all pending group requests. |
23,076 | def xrange(self, start, stop=None, step=1):
self._assert_active()
if stop is None:
start, stop = 0, start
with self._queuelock:
pool_loop_reached = max(self._thread_loop_ids)
self._thread_loop_ids[self._thread_num] += 1
loop_id = self._thread_loop_ids[self._thread_num]
if pool_loop_reached < loop_id:
for idx in range(start, stop, step):
self._dynamic_queue.put(idx)
return _QueueIterator(self._dynamic_queue, loop_id, self) | Get an iterator for this threads chunk of work.
This corresponds to using the OpenMP 'dynamic' schedule. |
23,077 | def signalize_extensions():
warnings.warn("DB-API extension cursor.rownumber used", SalesforceWarning)
warnings.warn("DB-API extension connection.<exception> used", SalesforceWarning)
warnings.warn("DB-API extension cursor.connection used", SalesforceWarning)
warnings.warn("DB-API extension cursor.messages used", SalesforceWarning)
warnings.warn("DB-API extension connection.messages used", SalesforceWarning)
warnings.warn("DB-API extension cursor.next(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.__iter__(, SalesforceWarning) used")
warnings.warn("DB-API extension cursor.lastrowid used", SalesforceWarning)
warnings.warn("DB-API extension .errorhandler used", SalesforceWarning) | DB API 2.0 extension are reported by warnings at run-time. |
23,078 | def _assert_struct_type(self, struct, name, types, path=None, extra_info=None):
wanted_yaml_typenames = set()
for t in types:
wanted_yaml_typenames.add(self._get_yaml_typename(t))
wanted_yaml_typenames = .join(wanted_yaml_typenames)
actual_yaml_typename = self._get_yaml_typename(type(struct))
if not isinstance(struct, types):
err = []
if path:
err.append(self._format_error_path(path + [name]))
err.append(.
format(w=wanted_yaml_typenames,
n=name,
a=actual_yaml_typename,
v=struct))
if extra_info:
err.append( + extra_info)
raise exceptions.YamlTypeError(.join(err)) | Asserts that given structure is of any of given types.
Args:
struct: structure to check
name: displayable name of the checked structure (e.g. "run_foo" for section run_foo)
types: list/tuple of types that are allowed for given struct
path: list with a source file as a first element and previous names
(as in name argument to this method) as other elements
extra_info: extra information to print if error is found (e.g. hint how to fix this)
Raises:
YamlTypeError: if given struct is not of any given type; error message contains
source file and a "path" (e.g. args -> somearg -> flags) specifying
where the problem is |
23,079 | def transform_field(instance, source_field_name, destination_field_name, transformation):
source_field = getattr(instance, source_field_name)
destination_field = getattr(instance, destination_field_name)
update_fields = [destination_field_name]
transformed_image = get_transformed_image(source_field, transformation)
if transformed_image:
destination_name = os.path.basename(source_field.name)
dimension_field_names = [
destination_field.field.height_field,
destination_field.field.width_field]
update_fields += filter(None, dimension_field_names)
destination_field.save(
destination_name,
transformed_image,
save=False
)
elif destination_field:
destination_field.delete()
else:
return
instance.save(update_fields=update_fields) | Does an image transformation on a instance. It will get the image
from the source field attribute of the instnace, then call
the transformation function with that instance, and finally
save that transformed image into the destination field attribute
of the instance.
.. note::
If the source field is blank or the transformation returns
a false value then the destination field image will be deleted, if it
exists.
.. warning::
When the model instance is saved with the new transformed image, it uses
the ``update_fields`` argument for
:py:meth:`~django.db.models.Model.save`, to tell the model to only update
the destination field and, if set in the destination field, the
:py:attr:`~django.db.models.ImageField.height_field` and
:py:attr:`~django.db.models.ImageField.width_field`. This means that
if the saving code for the model sets any other fields, in the saving
field process, it will not save those fields to the database. This would
only happen if you introduce custom logic to the saving process of
destination field, like the dimension fields do, that updates another field
on that module. In that case, when the model is saved for the
transformation, that other field will not be saved to the database.
:param instance: model instance to perform transformations on
:type instance: instance of :py:class:`django.db.models.Model`
:param source_field_name: field name on model to find source image
:type source_field_name: string
:param destination_field_name: field name on model save transformed image to
:type destination_field_name: string
:param transformation: function, such as :py:func:`~.transforms.scale`, that takes an image files and returns a transformed image
:type transformation: function |
23,080 | def update(self, size):
if not isinstance(size, int):
raise ValueError(
.format(type(size)))
self.current_size += size
self.display_queue.put((self.current_size, self.total_length)) | Update object size to be showed. This method called while uploading
:param size: Object size to be showed. The object size should be in bytes. |
23,081 | def assert_valid_schema(schema: GraphQLSchema) -> None:
errors = validate_schema(schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid. |
23,082 | def visit_Expr(self, node: ast.Expr) -> Optional[ast.Expr]:
if isinstance(
node.value,
(
ast.Constant,
ast.Name,
ast.NameConstant,
ast.Num,
ast.Str,
),
):
return None
return node | Eliminate no-op constant expressions which are in the tree
as standalone statements. |
23,083 | def _write_local_schema_file(self, cursor):
schema = []
for field in cursor.description:
field_name = field[0]
field_type = self.type_map(field[1])
field_mode = if field[1] in (1009, 1005, 1007,
1016) else
schema.append({
: field_name,
: field_type,
: field_mode,
})
self.log.info(, self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
s = json.dumps(schema, sort_keys=True).encode()
tmp_schema_file_handle.write(s)
return {self.schema_filename: tmp_schema_file_handle} | Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format. |
23,084 | def visitor_show(self, visitor_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/visitors
api_path = "/api/v2/visitors/{visitor_id}"
api_path = api_path.format(visitor_id=visitor_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/visitors#get-a-visitor |
23,085 | def cli(self, *args, **kwargs):
kwargs[] = self.api
return cli(*args, **kwargs) | Defines a CLI function that should be routed by this API |
23,086 | def remove(self):
LOGGER.debug("Cluster.remove - " + self.name)
if self.id is None:
return None
else:
params = SessionService.complete_transactional_req({
: self.name
})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params[] =
args = {: params}
else:
args = {: , : , : params}
response = ClusterService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc != 0:
LOGGER.warning(
+ self.name +
+ str(response.response_content) + + str(response.error_message) +
" (" + str(response.rc) + ")"
)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Cluster.remove", ArianeMappingOverloadError.ERROR_MSG)
return self
else:
return None | remove this object from Ariane server
:return: null if successfully removed else self |
23,087 | def add_model_string(self, model_str, position=1, file_id=None):
if file_id is None:
file_id = self.make_unique_id()
ret_data = self.file_create(File.from_string(model_str, position,
file_id))
return ret_data | Add a kappa model given in a string to the project. |
23,088 | def get_info(self):
info = {
: self.name if self.name else ,
: self.name if self.name else ,
: type(self).__name__,
: {
: repr(self._current_value),
: {
: b64encode(self._current_rendered.tobytes()).decode(),
: len(self._current_rendered),
: len(self._current_rendered.tobytes()),
}
},
: {
: self._num_mutations,
: self._current_index,
: self._mutating(),
: self._fuzzable,
},
}
return info | :rtype: dictionary
:return: field information |
23,089 | def scale(self, image, size, crop, options):
original_size = self.get_image_size(image)
factor = self._calculate_scaling_factor(original_size, size, crop is not None)
if factor < 1 or options[]:
width = int(original_size[0] * factor)
height = int(original_size[1] * factor)
image = self.engine_scale(image, width, height)
return image | Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up
option is set to True before calling ``engine_scale``.
:param image:
:param size:
:param crop:
:param options:
:return: |
23,090 | def reindex_all(self, batch_size=1000):
should_keep_synonyms = False
should_keep_rules = False
try:
if not self.settings:
self.settings = self.get_settings()
logger.debug(, self.index_name, self.settings)
else:
logger.debug("index %s already has settings: %s", self.index_name, self.settings)
except AlgoliaException as e:
if any("Index does not exist" in arg for arg in e.args):
pass
e) | Reindex all the records.
By default, this method use Model.objects.all() but you can implement
a method `get_queryset` in your subclass. This can be used to optimize
the performance (for example with select_related or prefetch_related). |
23,091 | def rbridge_id(self, **kwargs):
is_get_config = kwargs.pop(, False)
if not is_get_config:
rbridge_id = kwargs.pop()
else:
rbridge_id =
callback = kwargs.pop(, self._callback)
rid_args = dict(rbridge_id=rbridge_id)
rid = getattr(self._rbridge,
)
config = rid(**rid_args)
if is_get_config:
return callback(config, handler=)
return callback(config) | Configures device's rbridge ID. Setting this property will need
a switch reboot
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `rbridge_id` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.system.rbridge_id(rbridge_id='225')
... output = dev.system.rbridge_id(rbridge_id='225', get=True)
... dev.system.rbridge_id() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError |
23,092 | def scrape_args(self, records, executable=, partition_files=None,
model=None, outfiles=None, threads=1, parsimony=False, fast_tree=False,
n_starts=1):
args = []
to_delete = []
if partition_files is None:
partition_files = [None for rec in records]
if outfiles is None:
outfiles = [None for rec in records]
for (rec, qfile, ofile) in zip(records, partition_files, outfiles):
if model is None:
model = if rec.is_dna() else
filename, delete = rec.get_alignment_file(as_phylip=True)
if delete:
to_delete.append(filename)
to_delete.append(filename + )
if qfile is None:
if filename.endswith():
likely_qfile = filename.replace(, )
else:
likely_qfile = filename +
if os.path.exists(likely_qfile):
qfile = likely_qfile
else:
with tempfile.NamedTemporaryFile(mode=, delete=False) as tmpfile:
qfile = tmpfile.name
to_delete.append(tmpfile.name)
mymodel = if rec.is_dna() else model.replace(, ).replace(, ).replace(, )
partition_string = .format(
model=mymodel,
name=rec.name, seqlen=len(rec))
tmpfile.write(partition_string)
args.append((executable, filename, model, qfile, ofile, threads, parsimony, fast_tree, n_starts))
return args, to_delete | Examine a list of records and generate RAxML command line arguments for tree inference.
:param records: list of `Alignment` records
:param executable: name of the RAxML executable on the system to use. Must be in the user's path.
:param partition_files: List of RAxML partition files used to describe any partitioning scheme
to be used (optional)
:param model: Choice of model to use. Defaults to GTRGAMMA for DNA, or PROTGAMMALGX for amino acid alignments.
:param outfiles: A list of output file locations to write results (required length = 1 per alignment)
:param threads: Number of threads for RAxML to use. This is independent of any threading used by the
`JobHandler`, and the user should be sure that their choice is appropriate for the number of threads
available to their system, and for the RAxML executable being used.
:param parsimony: Use RAxML's parsimony tree search only
:param fast_tree: Use RAxML's experimental fast tree search (-f E)
:return: (List of command line arguments, List of created temporary files) |
23,093 | def ParseFileObject(self, parser_mediator, file_object):
filename = parser_mediator.GetFilename()
if not self._CACHE_FILENAME_RE.match(filename):
raise errors.UnableToParseFile()
file_size = file_object.get_size()
if file_size < 36:
raise errors.UnableToParseFile(
)
file_offset = self._GetCacheFileMetadataHeaderOffset(file_object)
file_metadata_header_map = self._GetDataTypeMap(
)
try:
file_metadata_header, _ = self._ReadStructureFromFileObject(
file_object, file_offset, file_metadata_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
).format(exception))
if not self._ValidateCacheFileMetadataHeader(file_metadata_header):
raise errors.UnableToParseFile()
url = file_object.read(file_metadata_header.key_size)
header_data = file_object.read()
display_name = parser_mediator.GetDisplayName()
request_method, response_code = self._ParseHTTPHeaders(
header_data[:-4], file_offset, display_name)
event_data = FirefoxCacheEventData()
event_data.fetch_count = file_metadata_header.fetch_count
event_data.frequency = file_metadata_header.frequency
event_data.request_method = request_method
event_data.request_size = file_metadata_header.key_size
event_data.response_code = response_code
event_data.version = self._CACHE_VERSION
event_data.url = url.decode(, errors=)
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.last_fetched_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_metadata_header.last_modified_time:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.last_modified_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_metadata_header.expiration_time:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=file_metadata_header.expiration_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a Firefox cache file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. |
23,094 | def summary_data_from_transaction_data(
transactions,
customer_id_col,
datetime_col,
monetary_value_col=None,
datetime_format=None,
observation_period_end=None,
freq="D",
freq_multiplier=1,
):
if observation_period_end is None:
observation_period_end = (
pd.to_datetime(transactions[datetime_col].max(), format=datetime_format).to_period(freq).to_timestamp()
)
else:
observation_period_end = (
pd.to_datetime(observation_period_end, format=datetime_format).to_period(freq).to_timestamp()
)
repeated_transactions = _find_first_transactions(
transactions, customer_id_col, datetime_col, monetary_value_col, datetime_format, observation_period_end, freq
)
repeated_transactions[datetime_col] = pd.Index(repeated_transactions[datetime_col]).to_timestamp()
customers = repeated_transactions.groupby(customer_id_col, sort=False)[datetime_col].agg(["min", "max", "count"])
customers["frequency"] = customers["count"] - 1
customers["T"] = (observation_period_end - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
customers["recency"] = (customers["max"] - customers["min"]) / np.timedelta64(1, freq) / freq_multiplier
summary_columns = ["frequency", "recency", "T"]
if monetary_value_col:
first_purchases = repeated_transactions[repeated_transactions["first"]].index
repeated_transactions.loc[first_purchases, monetary_value_col] = np.nan
customers["monetary_value"] = (
repeated_transactions.groupby(customer_id_col)[monetary_value_col].mean().fillna(0)
)
summary_columns.append("monetary_value")
return customers[summary_columns].astype(float) | Return summary data from transactions.
This transforms a DataFrame of transaction data of the form:
customer_id, datetime [, monetary_value]
to a DataFrame of the form:
customer_id, frequency, recency, T [, monetary_value]
Parameters
----------
transactions: :obj: DataFrame
a Pandas DataFrame that contains the customer_id col and the datetime col.
customer_id_col: string
the column in transactions DataFrame that denotes the customer_id
datetime_col: string
the column in transactions that denotes the datetime the purchase was made.
monetary_value_col: string, optional
the columns in the transactions that denotes the monetary value of the transaction.
Optional, only needed for customer lifetime value estimation models.
observation_period_end: datetime, optional
a string or datetime to denote the final date of the study.
Events after this date are truncated. If not given, defaults to the max 'datetime_col'.
datetime_format: string, optional
a string that represents the timestamp format. Useful if Pandas can't understand
the provided format.
freq: string, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc. Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
freq_multiplier: int, optional
Default 1, could be use to get exact recency and T, i.e. with freq='W'
row for user id_sample=1 will be recency=30 and T=39 while data in
CDNOW summary are different. Exact values could be obtained with
freq='D' and freq_multiplier=7 which will lead to recency=30.43
and T=38.86
Returns
-------
:obj: DataFrame:
customer_id, frequency, recency, T [, monetary_value] |
23,095 | def get(self, sid):
return ConnectAppContext(self._version, account_sid=self._solution[], sid=sid, ) | Constructs a ConnectAppContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.connect_app.ConnectAppContext
:rtype: twilio.rest.api.v2010.account.connect_app.ConnectAppContext |
23,096 | def delete_keys(d: Dict[Any, Any],
keys_to_delete: List[Any],
keys_to_keep: List[Any]) -> None:
for k in keys_to_delete:
if k in d and k not in keys_to_keep:
del d[k] | Deletes keys from a dictionary, in place.
Args:
d:
dictonary to modify
keys_to_delete:
if any keys are present in this list, they are deleted...
keys_to_keep:
... unless they are present in this list. |
23,097 | def assert_no_selector(self, *args, **kwargs):
query = SelectorQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_selector():
result = query.resolve_for(self)
if result.matches_count and (
len(result) > 0 or expects_none(query.options)):
raise ExpectationNotMet(result.negative_failure_message)
return True
return assert_no_selector() | Asserts that a given selector is not on the page or a descendant of the current node. Usage
is identical to :meth:`assert_selector`.
Query options such as ``count``, ``minimum``, and ``between`` are considered to be an
integral part of the selector. This will return True, for example, if a page contains 4
anchors but the query expects 5::
page.assert_no_selector("a", minimum=1) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=4) # Found, raises ExpectationNotMet
page.assert_no_selector("a", count=5) # Not Found, returns True
Args:
*args: Variable length argument list for :class:`SelectorQuery`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
True
Raises:
ExpectationNotMet: The given selector matched. |
23,098 | def data_filler_user_agent(self, number_of_rows, pipe):
try:
for i in range(number_of_rows):
pipe.hmset( % i, {
: rnd_id_generator(self),
: self.faker.ipv4(),
: self.faker.country_code(),
: self.faker.user_agent()
})
pipe.execute()
logger.warning(, extra=d)
except Exception as e:
logger.error(e, extra=d) | creates keys with user agent data |
23,099 | def _force_disconnect_action(self, action):
conn_key = action.data[]
if self._get_connection_state(conn_key) == self.Disconnected:
return
data = self._get_connection(conn_key)
if data[] == self.Connecting:
callback = data[].data[]
callback(data[], self.id, False, )
elif data[] == self.Disconnecting:
callback = data[].data[]
callback(data[], self.id, True, None)
elif data[] == self.InProgress:
callback = data[].data[]
if data[] == :
callback(False, , 0xFF, None)
elif data[] == :
callback(False, )
elif data[] == :
callback(False, )
connection_id = data[]
internal_id = data[]
del self._connections[connection_id]
del self._int_connections[internal_id] | Forcibly disconnect a device.
Args:
action (ConnectionAction): the action object describing what we are
forcibly disconnecting |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.