Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
383,100 | def _process_counter_example(self, mma, w_string):
diff = len(w_string)
same = 0
membership_answer = self._membership_query(w_string)
while True:
i = (same + diff) / 2
access_string = self._run_in_hypothesis(mma, w_string, i)
if membership_answer != self._membership_query(access_string + w_string[i:]):
diff = i
else:
same = i
if diff - same == 1:
break
exp = w_string[diff:]
self.observation_table.em_vector.append(exp)
for row in self.observation_table.sm_vector + self.observation_table.smi_vector:
self._fill_table_entry(row, exp)
return 0 | Process a counterexample in the Rivest-Schapire way.
Args:
mma (DFA): The hypothesis automaton
w_string (str): The examined string to be consumed
Returns:
None |
383,101 | def _set_options_headers(self, methods):
request = self.request
response = request.response
response.headers[] = .join(sorted(methods))
if in request.headers:
response.headers[] = \
.join(sorted(methods))
if in request.headers:
response.headers[] = \
return response | Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI |
383,102 | def observed_data_to_xarray(self):
data = self.observed_data
if not isinstance(data, dict):
raise TypeError("DictConverter.observed_data is not a dictionary")
if self.dims is None:
dims = {}
else:
dims = self.dims
observed_data = dict()
for key, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(key)
val_dims, coords = generate_dims_coords(
vals.shape, key, dims=val_dims, coords=self.coords
)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=None)) | Convert observed_data to xarray. |
383,103 | def locale(self) -> tornado.locale.Locale:
if not hasattr(self, "_locale"):
loc = self.get_user_locale()
if loc is not None:
self._locale = loc
else:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale | The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter. |
383,104 | def _power_mismatch_dc(self, buses, generators, B, Pbusinj, base_mva):
nb, ng = len(buses), len(generators)
gen_bus = array([g.bus._i for g in generators])
neg_Cg = csr_matrix((-ones(ng), (gen_bus, range(ng))), (nb, ng))
Amis = hstack([B, neg_Cg], format="csr")
Pd = array([bus.p_demand for bus in buses])
Gs = array([bus.g_shunt for bus in buses])
bmis = -(Pd - Gs) / base_mva - Pbusinj
return LinearConstraint("Pmis", Amis, bmis, bmis, ["Va", "Pg"]) | Returns the power mismatch constraint (B*Va + Pg = Pd). |
383,105 | def parse_params(self,
n_samples=None,
dx_min=-0.1,
dx_max=0.1,
n_dxs=2,
dy_min=-0.1,
dy_max=0.1,
n_dys=2,
angle_min=-30,
angle_max=30,
n_angles=6,
black_border_size=0,
**kwargs):
self.n_samples = n_samples
self.dx_min = dx_min
self.dx_max = dx_max
self.n_dxs = n_dxs
self.dy_min = dy_min
self.dy_max = dy_max
self.n_dys = n_dys
self.angle_min = angle_min
self.angle_max = angle_max
self.n_angles = n_angles
self.black_border_size = black_border_size
if self.dx_min < -1 or self.dy_min < -1 or \
self.dx_max > 1 or self.dy_max > 1:
raise ValueError("The value of translation must be bounded "
"within [-1, 1]")
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True | Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param n_samples: (optional) The number of transformations sampled to
construct the attack. Set it to None to run
full grid attack.
:param dx_min: (optional float) Minimum translation ratio along x-axis.
:param dx_max: (optional float) Maximum translation ratio along x-axis.
:param n_dxs: (optional int) Number of discretized translation ratios
along x-axis.
:param dy_min: (optional float) Minimum translation ratio along y-axis.
:param dy_max: (optional float) Maximum translation ratio along y-axis.
:param n_dys: (optional int) Number of discretized translation ratios
along y-axis.
:param angle_min: (optional float) Largest counter-clockwise rotation
angle.
:param angle_max: (optional float) Largest clockwise rotation angle.
:param n_angles: (optional int) Number of discretized angles.
:param black_border_size: (optional int) size of the black border in pixels. |
383,106 | def p_text(self, text):
item = text[1]
text[0] = item if item[0] != "\n" else u""
if len(text) > 2:
text[0] += "\n" | text : TEXT PARBREAK
| TEXT
| PARBREAK |
383,107 | def participant_ids(self):
return [user.UserID(chat_id=id_.chat_id, gaia_id=id_.gaia_id)
for id_ in self._event.membership_change.participant_ids] | :class:`~hangups.user.UserID` of users involved (:class:`list`). |
383,108 | def get_log_entry_mdata():
return {
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [],
: ,
: [],
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [MIN_DATETIME],
: ,
: []
},
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: False,
: False,
: False,
: False,
: [],
: ,
: [],
},
} | Return default mdata map for LogEntry |
383,109 | def center (self):
Cx = 0.0
Cy = 0.0
denom = 6.0 * self.area()
for segment in self.segments():
x = (segment.p.x + segment.q.x)
y = (segment.p.y + segment.q.y)
xy = (segment.p.x * segment.q.y) - (segment.q.x * segment.p.y)
Cx += (x * xy)
Cy += (y * xy)
Cx /= denom
Cy /= denom
return Point(Cx, Cy) | center() -> (x, y)
Returns the center (of mass) point of this Polygon.
See http://en.wikipedia.org/wiki/Polygon
Examples:
>>> p = Polygon()
>>> p.vertices = [ Point(3, 8), Point(6, 4), Point(0, 3) ]
>>> p.center()
Point(2.89285714286, 4.82142857143) |
383,110 | def _get_managed_files(self):
if self.grains_core.os_data().get() == :
return self.__get_managed_files_dpkg()
elif self.grains_core.os_data().get() in [, ]:
return self.__get_managed_files_rpm()
return list(), list(), list() | Build a in-memory data of all managed files. |
383,111 | def consume_texture_coordinates(self):
while True:
yield (
float(self.values[1]),
float(self.values[2]),
)
try:
self.next_line()
except StopIteration:
break
if not self.values:
break
if self.values[0] != "vt":
break | Consume all consecutive texture coordinates |
383,112 | def update_last_wm_layers(self, service_id, num_layers=10):
from hypermap.aggregator.models import Service
LOGGER.debug(
% (num_layers, num_layers)
)
service = Service.objects.get(id=service_id)
if service.type == :
from hypermap.aggregator.models import update_layers_wm_legacy as update_layers_wm
if service.type == :
from hypermap.aggregator.models import update_layers_geonode_wm as update_layers_wm
update_layers_wm(service, num_layers)
LOGGER.debug( % num_layers)
layer_to_unindex = service.layer_set.filter(was_deleted=True).order_by()[0:num_layers]
for layer in layer_to_unindex:
if not settings.REGISTRY_SKIP_CELERY:
unindex_layer(layer.id, use_cache=True)
else:
unindex_layer(layer.id)
LOGGER.debug( % num_layers)
layer_to_index = service.layer_set.filter(was_deleted=False).order_by()[0:num_layers]
for layer in layer_to_index:
if not settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id, use_cache=True)
else:
index_layer(layer.id) | Update and index the last added and deleted layers (num_layers) in WorldMap service. |
383,113 | def rmon_alarm_entry_alarm_rising_threshold(self, **kwargs):
config = ET.Element("config")
rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon")
alarm_entry = ET.SubElement(rmon, "alarm-entry")
alarm_index_key = ET.SubElement(alarm_entry, "alarm-index")
alarm_index_key.text = kwargs.pop()
alarm_rising_threshold = ET.SubElement(alarm_entry, "alarm-rising-threshold")
alarm_rising_threshold.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
383,114 | def stringClade(taxrefs, name, at):
string = []
for ref in taxrefs:
d = float(at-ref.level)
ident = re.sub("\s", "_", ref.ident)
string.append(.format(ident, d))
string = .join(string)
string = .format(string, name)
return string | Return a Newick string from a list of TaxRefs |
383,115 | def recovery(self, using=None, **kwargs):
return self._get_connection(using).indices.recovery(index=self._name, **kwargs) | The indices recovery API provides insight into on-going shard
recoveries for the index.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.recovery`` unchanged. |
383,116 | def getObjectByPid(self, pid):
self._check_initialized()
opid = rdflib.term.Literal(pid)
res = [o for o in self.subjects(predicate=DCTERMS.identifier, object=opid)]
return res[0] | Args:
pid : str
Returns:
str : URIRef of the entry identified by ``pid``. |
383,117 | def vm_snapshot_create(vm_name, kwargs=None, call=None):
if call != :
raise SaltCloudSystemExit(
)
if kwargs is None:
kwargs = {}
snapshot_name = kwargs.get(, None)
if snapshot_name is None:
raise SaltCloudSystemExit(
snapshot_name\
)
server, user, password = _get_xml_rpc()
auth = .join([user, password])
vm_id = int(get_vm_id(kwargs={: vm_name}))
response = server.one.vm.snapshotcreate(auth, vm_id, snapshot_name)
data = {
: ,
: response[0],
: response[1],
: response[2],
}
return data | Creates a new virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to create the snapshot.
snapshot_name
The name of the snapshot to be created.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_create my-vm snapshot_name=my-new-snapshot |
383,118 | def port(self, value=None):
if value is not None:
return URL._mutate(self, port=value)
return self._tuple.port | Return or set the port
:param string value: the new port to use
:returns: string or new :class:`URL` instance |
383,119 | def check_is_spam(content, content_object, request,
backends=None):
if backends is None:
backends = SPAM_CHECKER_BACKENDS
for backend_path in backends:
spam_checker = get_spam_checker(backend_path)
if spam_checker is not None:
is_spam = spam_checker(content, content_object, request)
if is_spam:
return True
return False | Return True if the content is a spam, else False. |
383,120 | def module_ids(self, rev=False):
shutit_global.shutit_global_object.yield_to_draw()
ids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)
if rev:
return list(reversed(ids))
return ids | Gets a list of module ids guaranteed to be sorted by run_order, ignoring conn modules
(run order < 0). |
383,121 | def create_environment(component_config):
ret = os.environ.copy()
for env in component_config.get_list("dp.env_list"):
real_env = env.upper()
value = os.environ.get(real_env)
value = _prepend_env(component_config, env, value)
value = _append_env(component_config, env, value)
_apply_change(ret, real_env, value, component_config)
return ret | Create a modified environment.
Arguments
component_config - The configuration for a component. |
383,122 | def run(self, endpoint: str, loop: AbstractEventLoop = None):
if not loop:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.run_async(endpoint))
except KeyboardInterrupt:
self._shutdown() | Run server main task.
:param endpoint: Socket endpoint to listen to, e.g. "tcp://*:1234"
:param loop: Event loop to run server in (alternatively just use run_async method) |
383,123 | def sorted_enums(self) -> List[Tuple[str, int]]:
return sorted(self.enum.items(), key=lambda x: x[1]) | Return list of enum items sorted by value. |
383,124 | def attribute_path(self, attribute, missing=None, visitor=None):
_parameters = {"node": self, "attribute": attribute}
if missing is not None:
_parameters["missing"] = missing
if visitor is not None:
_parameters["visitor"] = visitor
return self.__class__.objects.attribute_path(**_parameters) | Generates a list of values of the `attribute` of all ancestors of
this node (as well as the node itself). If a value is ``None``, then
the optional value of `missing` is used (by default ``None``).
By default, the ``getattr(node, attribute, None) or missing``
mechanism is used to obtain the value of the attribute for each
node. This can be overridden by supplying a custom `visitor`
function, which expects as arguments the node and the attribute, and
should return an appropriate value for the required attribute.
:param attribute: the name of the attribute.
:param missing: optional value to use when attribute value is None.
:param visitor: optional function responsible for obtaining the
attribute value from a node.
:return: a list of values of the required `attribute` of the
ancestor path of this node. |
383,125 | def create_comment(self, access_token, video_id, content,
reply_id=None, captcha_key=None, captcha_text=None):
url =
data = {
: self.client_id,
: access_token,
: video_id,
: content,
: reply_id,
: captcha_key,
: captcha_text
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json()[] | doc: http://open.youku.com/docs/doc?id=41 |
383,126 | def elcm_profile_set(irmc_info, input_data):
if isinstance(input_data, dict):
data = jsonutils.dumps(input_data)
else:
data = input_data
_irmc_info = dict(irmc_info)
_irmc_info[] = PROFILE_SET_TIMEOUT
content_type =
if input_data[].get():
content_type =
resp = elcm_request(_irmc_info,
method=,
path=URL_PATH_PROFILE_MGMT + ,
headers={: content_type},
data=data)
if resp.status_code == 202:
return _parse_elcm_response_body_as_json(resp)
else:
raise scci.SCCIClientError((
%
{: resp.status_code})) | send an eLCM request to set param values
To apply param values, a new session is spawned with status 'running'.
When values are applied or error, the session ends.
:param irmc_info: node info
:param input_data: param values to apply, eg.
{
'Server':
{
'SystemConfig':
{
'BiosConfig':
{
'@Processing': 'execute',
-- config data --
}
}
}
}
:returns: dict object of session info if succeed
{
'Session':
{
'Id': id
'Status': 'activated'
...
}
}
:raises: SCCIClientError if SCCI failed |
383,127 | def open_buffer(self, location=None, show_in_current_window=False):
eb = self._get_or_create_editor_buffer(location)
if show_in_current_window:
self.show_editor_buffer(eb) | Open/create a file, load it, and show it in a new buffer. |
383,128 | def to_api_repr(self):
resource = {self.entity_type: self.entity_id}
if self.role is not None:
resource["role"] = self.role
return resource | Construct the API resource representation of this access entry
Returns:
Dict[str, object]: Access entry represented as an API resource |
383,129 | def copy_and_run(config, src_dir):
job = fake_fetch_job(config, src_dir)
if job:
job._run_validate()
return True
else:
return False | Local-only operation of the executor.
Intended for validation script developers,
and the test suite.
Please not that this function only works correctly
if the validator has one of the following names:
- validator.py
- validator.zip
Returns True when a job was prepared and executed.
Returns False when no job could be prepared. |
383,130 | def maxlike(self,nseeds=50):
m0,age0,feh0 = self.ic.random_points(nseeds)
d0 = 10**(rand.uniform(0,np.log10(self.max_distance),size=nseeds))
AV0 = rand.uniform(0,self.maxAV,size=nseeds)
costs = np.zeros(nseeds)
if self.fit_for_distance:
pfits = np.zeros((nseeds,5))
else:
pfits = np.zeros((nseeds,3))
def fn(p):
return -1*self.lnpost(p)
for i,m,age,feh,d,AV in zip(range(nseeds),
m0,age0,feh0,d0,AV0):
if self.fit_for_distance:
pfit = scipy.optimize.fmin(fn,[m,age,feh,d,AV],disp=False)
else:
pfit = scipy.optimize.fmin(fn,[m,age,feh],disp=False)
pfits[i,:] = pfit
costs[i] = self.lnpost(pfit)
return pfits[np.argmax(costs),:] | Returns the best-fit parameters, choosing the best of multiple starting guesses
:param nseeds: (optional)
Number of starting guesses, uniformly distributed throughout
allowed ranges. Default=50.
:return:
list of best-fit parameters: ``[m,age,feh,[distance,A_V]]``.
Note that distance and A_V values will be meaningless unless
magnitudes are present in ``self.properties``. |
383,131 | def get_repositories(self, digests):
if self.workflow.push_conf.pulp_registries:
registries = self.workflow.push_conf.pulp_registries
else:
registries = self.workflow.push_conf.all_registries
output_images = []
for registry in registries:
image = self.pullspec_image.copy()
image.registry = registry.uri
pullspec = image.to_str()
output_images.append(pullspec)
digest_list = digests.get(image.to_str(registry=False), ())
for digest in digest_list:
digest_pullspec = image.to_str(tag=False) + "@" + digest
output_images.append(digest_pullspec)
return output_images | Build the repositories metadata
:param digests: dict, image -> digests |
383,132 | def read_handshake(self):
msg = self.read_message()
pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"]
open(join(pid_dir, str(self.pid)), "w").close()
self.send_message({"pid": self.pid})
return _conf, _context | Read and process an initial handshake message from Storm. |
383,133 | def _login(self, max_tries=2):
if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL):
raise BrowserError(
%
(self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL))
email_field_loaded = lambda br: br.find_elements_by_id()
self._wait().until(email_field_loaded)
tries = 0
while tries < max_tries:
email_elem = self.find_element_by_id()
email_elem.clear()
email_elem.send_keys(self._uname)
pword_elem = self.find_element_by_id()
pword_elem.clear()
pword_elem.send_keys(self._pword)
def creds_entered(_):
email_ok = email_elem.get_attribute() == self._uname
pword_ok = pword_elem.get_attribute() == self._pword
return email_ok and pword_ok
kcr_page_loaded = lambda br: br.title == u
try:
self._wait(5).until(creds_entered)
self.find_element_by_id().click()
self._wait(5).until(kcr_page_loaded)
except TimeoutException:
tries += 1
else:
return
raise LoginError | Logs in to Kindle Cloud Reader.
Args:
max_tries: The maximum number of login attempts that will be made.
Raises:
BrowserError: If method called when browser not at a signin URL.
LoginError: If login unsuccessful after `max_tries` attempts. |
383,134 | def sort_topologically(dag):
dag = copy.deepcopy(dag)
sorted_nodes = []
independent_nodes = deque(get_independent_nodes(dag))
while independent_nodes:
node = independent_nodes.popleft()
sorted_nodes.append(node)
downstream_nodes = dag[node]
while downstream_nodes:
downstream_node = downstream_nodes.pop(0)
if downstream_node not in dag:
continue
if not has_dependencies(downstream_node, dag):
independent_nodes.append(downstream_node)
if len(sorted_nodes) != len(dag.keys()):
raise ValueError()
return sorted_nodes | Sort the dag breath first topologically.
Only the nodes inside the dag are returned, i.e. the nodes that are also keys.
Returns:
a topological ordering of the DAG.
Raises:
an error if this is not possible (graph is not valid). |
383,135 | def connect(self):
future = concurrent.Future()
if self.connected:
raise exceptions.ConnectError()
LOGGER.debug(, self.name)
self.io_loop.add_future(
self._client.connect(self.host, self.port),
lambda f: self._on_connected(f, future))
return future | Connect to the Redis server if necessary.
:rtype: :class:`~tornado.concurrent.Future`
:raises: :class:`~tredis.exceptions.ConnectError`
:class:`~tredis.exceptinos.RedisError` |
383,136 | def _read_credential_file(self, cfg):
self.username = cfg.get("keystone", "username")
self.password = cfg.get("keystone", "password", raw=True)
self.tenant_id = cfg.get("keystone", "tenant_id") | Implements the default (keystone) behavior. |
383,137 | def get_readwrite_instance(cls, working_dir, restore=False, restore_block_height=None):
log.warning("!!! Getting raw read/write DB instance !!!")
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block())
rc = db.db_setup()
if not rc:
if restore:
log.debug("Restoring from unclean shutdown")
rc = db.db_restore(block_number=restore_block_height)
if rc:
return db
else:
log.error("Failed to restore from unclean shutdown")
db.close()
raise Exception("Failed to set up db")
return db | Get a read/write instance to the db, without the singleton check.
Used for low-level operations like db restore.
Not used in the steady state behavior of the system. |
383,138 | def purge_old_files(date_time, directory_path, custom_prefix="backup"):
for file_name in listdir(directory_path):
try:
file_date_time = get_backup_file_time_tag(file_name, custom_prefix=custom_prefix)
except ValueError as e:
if "does not match format" in e.message:
print("WARNING. file(s) in %s do not match naming convention."
% (directory_path))
continue
raise e
if file_date_time < date_time:
remove(directory_path + file_name) | Takes a datetime object and a directory path, runs through files in the
directory and deletes those tagged with a date from before the provided
datetime.
If your backups have a custom_prefix that is not the default ("backup"),
provide it with the "custom_prefix" kwarg. |
383,139 | def index(config):
client = Client()
client.prepare_connection()
group_api = API(client)
print(group_api.index()) | Display group info in raw format. |
383,140 | def afterqc_general_stats_table(self):
headers = OrderedDict()
headers[] = {
: ,
: ,
: 100,
: 0,
: ,
: ,
}
headers[] = {
: .format(config.read_count_prefix),
: .format(config.read_count_desc),
: 0,
: lambda x: x * config.read_count_multiplier,
: ,
:
}
headers[] = {
: .format(config.read_count_prefix),
: .format(config.read_count_desc),
: 0,
: lambda x: x * config.read_count_multiplier,
: ,
:
}
headers[] = {
: ,
: ,
: 0,
: ,
: ,
:
}
self.general_stats_addcols(self.afterqc_data, headers) | Take the parsed stats from the Afterqc report and add it to the
General Statistics table at the top of the report |
383,141 | def _set_buttons(self, chat, bot):
if isinstance(self.reply_markup, (
types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)):
self._buttons = [[
MessageButton(self._client, button, chat, bot, self.id)
for button in row.buttons
] for row in self.reply_markup.rows]
self._buttons_flat = [x for row in self._buttons for x in row] | Helper methods to set the buttons given the input sender and chat. |
383,142 | def get_place_tags(index_page, domain):
ip_address = get_ip_address(domain)
dom = dhtmlparser.parseString(index_page)
place_tags = [
get_html_geo_place_tags(dom),
get_whois_tags(ip_address),
]
return sum(place_tags, []) | Return list of `place` tags parsed from `meta` and `whois`.
Args:
index_page (str): HTML content of the page you wisht to analyze.
domain (str): Domain of the web, without ``http://`` or other parts.
Returns:
list: List of :class:`.SourceString` objects. |
383,143 | def create_resource(output_model, rtype, unique, links, existing_ids=None, id_helper=None):
if isinstance(id_helper, str):
idg = idgen(id_helper)
elif isinstance(id_helper, GeneratorType):
idg = id_helper
elif id_helper is None:
idg = default_idgen(None)
else:
raise ValueError()
ctx = context(None, None, output_model, base=None, idgen=idg, existing_ids=existing_ids, extras=None)
rid = I(materialize_entity(ctx, rtype, unique=unique))
if existing_ids is not None:
if rid in existing_ids:
return (False, rid)
existing_ids.add(rid)
output_model.add(rid, VTYPE_REL, rtype)
for r, t in links:
output_model.add(rid, r, t)
return (True, rid) | General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created |
383,144 | def dec2dms(dec):
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
sign = np.copysign(1.0,dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
deg = int(deg * sign)
return (deg, minute, second) | ADW: This should really be replaced by astropy |
383,145 | def load_params_from_file(self, fname: str):
utils.check_condition(os.path.exists(fname), "No model parameter file found under %s. "
"This is either not a model directory or the first training "
"checkpoint has not happened yet." % fname)
self.params, self.aux_params = utils.load_params(fname)
utils.check_condition(all(name.startswith(self.prefix) for name in self.params.keys()),
"Not all parameter names start with model prefix " % self.prefix)
utils.check_condition(all(name.startswith(self.prefix) for name in self.aux_params.keys()),
"Not all auxiliary parameter names start with model prefix " % self.prefix)
logger.info(, fname) | Loads and sets model parameters from file.
:param fname: Path to load parameters from. |
383,146 | def __del_running_bp(self, tid, bp):
"Auxiliary method."
self.__runningBP[tid].remove(bp)
if not self.__runningBP[tid]:
del self.__runningBP[tid] | Auxiliary method. |
383,147 | def to_det_oid(self, det_id_or_det_oid):
try:
int(det_id_or_det_oid)
except ValueError:
return det_id_or_det_oid
else:
return self.get_det_oid(det_id_or_det_oid) | Convert det OID or ID to det OID |
383,148 | def install_sql(self, site=None, database=, apps=None, stop_on_error=0, fn=None):
stop_on_error = int(stop_on_error)
site = site or ALL
name = database
r = self.local_renderer
paths = glob.glob(r.format(r.env.install_sql_path_template))
apps = [_ for _ in (apps or ).split() if _.strip()]
if self.verbose:
print(, apps)
def cmp_paths(d0, d1):
if d0[1] and d0[1] in d1[2]:
return -1
if d1[1] and d1[1] in d0[2]:
return +1
return cmp(d0[0], d1[0])
def get_paths(t):
data = []
for path in paths:
if fn and fn not in path:
continue
parts = path.split()
if len(parts) == 3 and parts[1] != t:
continue
if not path.lower().endswith():
continue
content = open(path, ).read()
matches = re.findall(r, content, flags=re.IGNORECASE)
view_name =
if matches:
view_name = matches[0]
print( % view_name)
data.append((path, view_name, content))
for d in sorted(data, cmp=cmp_paths):
yield d[0]
def run_paths(paths, cmd_template, max_retries=3):
r = self.local_renderer
paths = list(paths)
error_counts = defaultdict(int)
terminal = set()
if self.verbose:
print( % len(paths))
while paths:
path = paths.pop(0)
if self.verbose:
print(, path)
app_name = re.findall(r, path)[0]
if apps and app_name not in apps:
self.vprint( % app_name)
continue
with self.settings(warn_only=True):
if self.is_local:
r.env.sql_path = path
else:
r.env.sql_path = % os.path.split(path)[-1]
r.put(local_path=path, remote_path=r.env.sql_path)
ret = r.run_or_local(cmd_template)
if ret and ret.return_code:
if stop_on_error:
raise Exception( % path)
error_counts[path] += 1
if error_counts[path] < max_retries:
paths.append(path)
else:
terminal.add(path)
if terminal:
print( % len(terminal), file=sys.stderr)
for path in sorted(list(terminal)):
print(path, file=sys.stderr)
print(file=sys.stderr)
if self.verbose:
print(, r.env.db_engine)
for _site, site_data in self.iter_sites(site=site, no_secure=True):
self.set_db(name=name, site=_site)
if in r.env.db_engine or in r.env.db_engine:
paths = list(get_paths())
run_paths(
paths=paths,
cmd_template="psql --host={db_host} --user={db_user} --no-password -d {db_name} -f {sql_path}")
elif in r.env.db_engine:
paths = list(get_paths())
run_paths(
paths=paths,
cmd_template="mysql -v -h {db_host} -u {db_user} -p {db_name} < {sql_path}")
else:
raise NotImplementedError | Installs all custom SQL. |
383,149 | def updateTable(self, networkId, tableType, body, class_, verbose=None):
response=api(url=self.___url++str(networkId)++str(tableType)+, method="PUT", body=body, verbose=verbose)
return response | Updates the table specified by the `tableType` and `networkId` parameters. New columns will be created if they do not exist in the target table.
Current limitations:
* Numbers are handled as Double
* List column is not supported in this version
:param networkId: SUID containing the table
:param tableType: Type of table
:param body: The data with which to update the table.
:param class_: None -- Not required, can be None
:param verbose: print more
:returns: default: successful operation |
383,150 | def _check_endings(self):
if self.slug.startswith("/") and self.slug.endswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/"))))
elif self.slug.startswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/"))))
elif self.slug.endswith("/"):
raise InvalidSlugError(
_("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/")))) | Check begin/end of slug, raises Error if malformed. |
383,151 | def _default_verify_function(instance, answer, result_host, atol, verbose):
if len(instance.arguments) != len(answer):
raise TypeError("The length of argument list and provided results do not match.")
for i, arg in enumerate(instance.arguments):
if answer[i] is not None:
if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray):
if answer[i].dtype != arg.dtype:
raise TypeError("Element " + str(i)
+ " of the expected results list is not of the same dtype as the kernel output: "
+ str(answer[i].dtype) + " != " + str(arg.dtype) + ".")
if answer[i].size != arg.size:
raise TypeError("Element " + str(i)
+ " of the expected results list has a size different from "
+ "the kernel argument: "
+ str(answer[i].size) + " != " + str(arg.size) + ".")
elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number):
if answer[i].dtype != arg.dtype:
raise TypeError("Element " + str(i)
+ " of the expected results list is not the same as the kernel output: "
+ str(answer[i].dtype) + " != " + str(arg.dtype) + ".")
else:
if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number):
raise TypeError("Element " + str(i)
+ " of expected results list is not a numpy array or numpy scalar.")
else:
raise TypeError("Element " + str(i)
+ " of expected results list and kernel arguments have different types.")
def _ravel(a):
if hasattr(a, ) and len(a.shape) > 1:
return a.ravel()
return a
def _flatten(a):
if hasattr(a, ):
return a.flatten()
return a
correct = True
for i, arg in enumerate(instance.arguments):
expected = answer[i]
if expected is not None:
result = _ravel(result_host[i])
expected = _flatten(expected)
output_test = numpy.allclose(expected, result, atol=atol)
if not output_test and verbose:
print("Error: " + util.get_config_string(instance.params) + " detected during correctness check")
print("this error occured when checking value of the %oth kernel argument" % (i,))
print("Printing kernel output and expected result, set verbose=False to suppress this debug print")
numpy.set_printoptions(edgeitems=50)
print("Kernel output:")
print(result)
print("Expected:")
print(expected)
correct = correct and output_test
if not correct:
logging.debug()
raise Exception("Error: " + util.get_config_string(instance.params) + " failed correctness check")
return correct | default verify function based on numpy.allclose |
383,152 | def Drop(self: dict, n):
n = len(self) - n
if n <= 0:
yield from self.items()
else:
for i, e in enumerate(self.items()):
if i == n:
break
yield e | [
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [1, 2]
}
] |
383,153 | def ToPath(self):
if self.path_type == PathInfo.PathType.OS:
return os.path.join("fs", "os", *self.path_components)
elif self.path_type == PathInfo.PathType.TSK:
return os.path.join("fs", "tsk", *self.path_components)
elif self.path_type == PathInfo.PathType.REGISTRY:
return os.path.join("registry", *self.path_components)
elif self.path_type == PathInfo.PathType.TEMP:
return os.path.join("temp", *self.path_components)
raise ValueError("Unsupported path type: %s" % self.path_type) | Converts a reference into a VFS file path. |
383,154 | def run(self):
if self.stdout:
sys.stdout.write("extracted json data:\n" + json.dumps(
self.metadata, default=to_str) + "\n")
else:
extract_dist.class_metadata = self.metadata | Sends extracted metadata in json format to stdout if stdout
option is specified, assigns metadata dictionary to class_metadata
variable otherwise. |
383,155 | def query(ra ,dec, rad=0.1, query=None):
if query is None:
query=(
.format(ra,dec,rad) )
tapURL = "http://TAPVizieR.u-strasbg.fr/TAPVizieR/tap/sync"
tapParams={: ,
: ,
: ,
: query}
response = requests.get(tapURL, params=tapParams)
data = StringIO(response.text)
data.seek(0)
data.seek(0)
T = votable.parse_single_table(data).to_table()
return T | Query the CADC TAP service to determine the list of images for the
NewHorizons Search. Things to determine:
a- Images to have the reference subtracted from.
b- Image to use as the 'REFERENCE' image.
c- Images to be used for input into the reference image
Logic: Given a particular Image/CCD find all the CCDs of the same field that
overlap that CCD but are taken more than 7 days later or earlier than
that image. |
383,156 | def delete_guest(userid):
guest_list_info = client.send_request()
print("\nFailed to delete guest %s!" % userid)
else:
print("\nSucceeded to delete guest %s!" % userid) | Destroy a virtual machine.
Input parameters:
:userid: USERID of the guest, last 8 if length > 8 |
383,157 | def add_key_filter(self, *args):
if self._input_mode == :
raise ValueError()
self._key_filters.append(args)
return self | Add a single key filter to the inputs.
:param args: a filter
:type args: list
:rtype: :class:`RiakMapReduce` |
383,158 | def GroupsUsersPost(self, parameters, group_id):
if self.__SenseApiCall__(.format(group_id = group_id), , parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | Add users to a group in CommonSense.
@param parameters (dictonary) - Dictionary containing the users to add.
@return (bool) - Boolean indicating whether GroupsPost was successful. |
383,159 | def _run_incremental_transforms(self, si, transforms):
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn(,
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
logger.info(,
transform, si.stream_id)
except Exception, exc:
logger.critical(
,
transform, si and si.stream_id, self.context.get(),
si and si.abs_url, exc_info=True)
assert si is not None
if not si.stream_time:
raise InvalidStreamItem( % si)
if si.stream_id is None:
raise InvalidStreamItem( % si)
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem( %
type(si))
self.t_chunk.add(si)
return si | Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None. |
383,160 | def hour(self, value=None):
if value is not None:
try:
value = int(value)
except ValueError:
raise ValueError(
.format(value))
if value < 1:
raise ValueError(
)
if value > 24:
raise ValueError(
)
self._hour = value | Corresponds to IDD Field `hour`
Args:
value (int): value for IDD Field `hour`
value >= 1
value <= 24
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
383,161 | def write_config_value_to_file(key, value, config_path=None):
if config_path is None:
config_path = DEFAULT_CONFIG_PATH
config = _get_config_dict_from_file(config_path)
config[key] = value
mkdir_parents(os.path.dirname(config_path))
with open(config_path, "w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
os.chmod(config_path, 33216)
return get_config_value_from_file(key, config_path) | Write key/value pair to config file. |
383,162 | def _client_builder(self):
client_config = self.app.config.get() or {}
client_config.setdefault(
, self.app.config.get())
client_config.setdefault(, RequestsHttpConnection)
return Elasticsearch(**client_config) | Build Elasticsearch client. |
383,163 | def get_bool_value(self, section, option, default=True):
try:
return self.parser.getboolean(section, option)
except NoOptionError:
return bool(default) | Get the bool value of an option, if it exists. |
383,164 | def begin(self, **options):
if self.transaction is not None:
raise InvalidTransaction("A transaction is already begun.")
else:
self.transaction = Transaction(self, **options)
return self.transaction | Begin a new :class:`Transaction`. If this :class:`Session`
is already in a :ref:`transactional state <transactional-state>`,
an error will occur. It returns the :attr:`transaction` attribute.
This method is mostly used within a ``with`` statement block::
with session.begin() as t:
t.add(...)
...
which is equivalent to::
t = session.begin()
t.add(...)
...
session.commit()
``options`` parameters are passed to the :class:`Transaction` constructor. |
383,165 | def send_notification(self, method, *args):
message = self._version.create_request(method, args, notification=True)
self.send_message(message) | Send a JSON-RPC notification.
The notification *method* is sent with positional arguments *args*. |
383,166 | def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8)) | Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition. |
383,167 | def longitude(self, value=0.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
.format(value))
if value < -180.0:
raise ValueError(
)
if value > 180.0:
raise ValueError(
)
self._longitude = value | Corresponds to IDD Field `longitude`
- is West, + is East, degree minutes represented in decimal (i.e. 30 minutes is .5)
Args:
value (float): value for IDD Field `longitude`
Unit: deg
Default value: 0.0
value >= -180.0
value <= 180.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
383,168 | def parser_factory(fake_args=None):
parser = ArgumentParser(description=)
subparsers = parser.add_subparsers(dest=,
help=
)
extract_file_args(subparsers)
environment_args(subparsers)
aws_env_args(subparsers)
seed_args(subparsers)
render_args(subparsers)
diff_args(subparsers)
freeze_args(subparsers)
thaw_args(subparsers)
template_args(subparsers)
password_args(subparsers)
token_args(subparsers)
help_args(subparsers)
export_args(subparsers)
if fake_args is None:
return parser, parser.parse_args()
return parser, parser.parse_args(fake_args) | Return a proper contextual OptionParser |
383,169 | def read_mda(attribute):
lines = attribute.split()
mda = {}
current_dict = mda
path = []
prev_line = None
for line in lines:
if not line:
continue
if line == :
break
if prev_line:
line = prev_line + line
key, val = line.split()
key = key.strip()
val = val.strip()
try:
val = eval(val)
except NameError:
pass
except SyntaxError:
prev_line = line
continue
prev_line = None
if key in [, ]:
new_dict = {}
path.append(val)
current_dict[val] = new_dict
current_dict = new_dict
elif key in [, ]:
if val != path[-1]:
raise SyntaxError
path = path[:-1]
current_dict = mda
for item in path:
current_dict = current_dict[item]
elif key in [, ]:
pass
else:
current_dict[key] = val
return mda | Read HDFEOS metadata and return a dict with all the key/value pairs. |
383,170 | def get_all_subdomains(offset=0, count=100, proxy=None, hostport=None):
assert proxy or hostport,
if proxy is None:
proxy = connect_hostport(hostport)
offset = int(offset)
count = int(count)
page_schema = {
: ,
: {
: {
: ,
: {
: ,
: True
},
},
},
: [
,
],
}
schema = json_response_schema(page_schema)
try:
resp = proxy.get_all_subdomains(offset, count)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
for name in resp[]:
if not is_subdomain(str(name)):
raise ValidationError(.format(str(name)))
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {: , : 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {: , : 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {: , : 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {: , : 500}
return resp
return resp[] | Get all subdomains within the given range.
Return the list of names on success
Return {'error': ...} on failure |
383,171 | def check_subdomain_transition(cls, existing_subrec, new_subrec):
if existing_subrec.get_fqn() != new_subrec.get_fqn():
return False
if existing_subrec.n + 1 != new_subrec.n:
return False
if not new_subrec.verify_signature(existing_subrec.address):
log.debug("Invalid signature from {}".format(existing_subrec.address))
return False
if virtualchain.address_reencode(existing_subrec.address) != virtualchain.address_reencode(new_subrec.address):
if new_subrec.independent:
log.debug("Transfer is independent of domain: {}".format(new_subrec))
return False
return True | Given an existing subdomain record and a (newly-discovered) new subdomain record,
determine if we can use the new subdomain record (i.e. is its signature valid? is it in the right sequence?)
Return True if so
Return False if not |
383,172 | def _strict_match(self, struct1, struct2, fu, s1_supercell=True,
use_rms=False, break_on_match=False):
if fu < 1:
raise ValueError("fu cannot be less than 1")
mask, s1_t_inds, s2_t_ind = self._get_mask(struct1, struct2,
fu, s1_supercell)
if mask.shape[0] > mask.shape[1]:
raise ValueError(
)
if (not self._subset) and mask.shape[1] != mask.shape[0]:
return None
if LinearAssignment(mask).min_cost > 0:
return None
best_match = None
for s1fc, s2fc, avg_l, sc_m in \
self._get_supercells(struct1, struct2, fu, s1_supercell):
normalization = (len(s1fc) / avg_l.volume) ** (1/3)
inv_abc = np.array(avg_l.reciprocal_lattice.abc)
frac_tol = inv_abc * self.stol / (np.pi * normalization)
for s1i in s1_t_inds:
t = s1fc[s1i] - s2fc[s2_t_ind]
t_s2fc = s2fc + t
if self._cmp_fstruct(s1fc, t_s2fc, frac_tol, mask):
inv_lll_abc = np.array(avg_l.get_lll_reduced_lattice().reciprocal_lattice.abc)
lll_frac_tol = inv_lll_abc * self.stol / (np.pi * normalization)
dist, t_adj, mapping = self._cart_dists(
s1fc, t_s2fc, avg_l, mask, normalization, lll_frac_tol)
if use_rms:
val = np.linalg.norm(dist) / len(dist) ** 0.5
else:
val = max(dist)
if best_match is None or val < best_match[0]:
total_t = t + t_adj
total_t -= np.round(total_t)
best_match = val, dist, sc_m, total_t, mapping
if (break_on_match or val < 1e-5) and val < self.stol:
return best_match
if best_match and best_match[0] < self.stol:
return best_match | Matches struct2 onto struct1 (which should contain all sites in
struct2).
Args:
struct1, struct2 (Structure): structures to be matched
fu (int): size of supercell to create
s1_supercell (bool): whether to create the supercell of
struct1 (vs struct2)
use_rms (bool): whether to minimize the rms of the matching
break_on_match (bool): whether to stop search at first
valid match |
383,173 | def authenticate_nova_user(self, keystone, user, password, tenant):
self.log.debug(.format(user))
ep = keystone.service_catalog.url_for(service_type=,
interface=)
if keystone.session:
return nova_client.Client(NOVA_CLIENT_VERSION,
session=keystone.session,
auth_url=ep)
elif novaclient.__version__[0] >= "7":
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, password=password,
project_name=tenant, auth_url=ep)
else:
return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep) | Authenticates a regular user with nova-api. |
383,174 | def _setup(self):
"Resets the state and prepares for running the example."
self.example.error = None
self.example.traceback =
c = Context(parent=self.context)
self.context = c
if self.is_root_runner:
run.before_all.execute(self.context)
self.example.before(self.context) | Resets the state and prepares for running the example. |
383,175 | def chartbeat_top(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError(" takes no arguments" % bits[0])
return ChartbeatTopNode() | Top Chartbeat template tag.
Render the top Javascript code for Chartbeat. |
383,176 | def _is_executable_file(path):
fpath = os.path.realpath(path)
if not os.path.isfile(fpath):
return False
return os.access(fpath, os.X_OK) | Checks that path is an executable regular file, or a symlink towards one.
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``.
This function was forked from pexpect originally:
Copyright (c) 2013-2014, Pexpect development team
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
383,177 | def render(directory, opt):
if not os.path.exists(directory) and not os.path.isdir(directory):
os.mkdir(directory)
a_secretfile = render_secretfile(opt)
s_path = "%s/Secretfile" % directory
LOG.debug("writing Secretfile to %s", s_path)
open(s_path, ).write(a_secretfile)
ctx = Context.load(yaml.safe_load(a_secretfile), opt)
for resource in ctx.resources():
if not resource.present:
continue
if issubclass(type(resource), Policy):
if not os.path.isdir("%s/policy" % directory):
os.mkdir("%s/policy" % directory)
filename = "%s/policy/%s" % (directory, resource.path)
open(filename, ).write(resource.obj())
LOG.debug("writing %s to %s", resource, filename)
elif issubclass(type(resource), AWSRole):
if not os.path.isdir("%s/aws" % directory):
os.mkdir("%s/aws" % directory)
if in resource.obj():
filename = "%s/aws/%s" % (directory,
os.path.basename(resource.path))
r_obj = resource.obj()
if in r_obj:
LOG.debug("writing %s to %s", resource, filename)
open(filename, ).write(r_obj[]) | Render any provided template. This includes the Secretfile,
Vault policies, and inline AWS roles |
383,178 | def xform_key(self, key):
t need to worry about cache keys containing invalid
charactersutf-8'))
return newkey.hexdigest() | we transform cache keys by taking their sha1 hash so that
we don't need to worry about cache keys containing invalid
characters |
383,179 | def is_classmethod(meth):
if inspect.ismethoddescriptor(meth):
return isinstance(meth, classmethod)
if not inspect.ismethod(meth):
return False
if not inspect.isclass(meth.__self__):
return False
if not hasattr(meth.__self__, meth.__name__):
return False
return meth == getattr(meth.__self__, meth.__name__) | Detects if the given callable is a classmethod. |
383,180 | def get(self, key, timeout=None):
key = self.pre_identifier + key
unpickled_entry = self.client.get(key)
if not unpickled_entry:
return None
entry = pickle.loads(unpickled_entry)
if timeout is None:
timeout = self.timeout
if self._is_expired(entry, timeout):
self.delete_entry(key)
return None
return entry[1] | Given a key, returns an element from the redis table |
383,181 | def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
species = list(map(str, struct.species))
for i,item in enumerate(species):
if not item in ldict.keys():
species[i]=str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_list = []
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
add_ij = False
for move_cell in itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]):
if not add_ij:
frac_diff = fc[j] + move_cell - fc[i]
distance_ij = np.dot(latmat.T, frac_diff)
if np.linalg.norm(distance_ij) < max_bond_length:
add_ij = True
if add_ij:
connected_list.append([i, j])
return np.array(connected_list) | Finds the list of bonded atoms.
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms are considered bonded if (radius of atom 1) + (radius of atom 2) + (tolerance) < (distance between atoms 1 and 2). Default value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values from JMol are used as default
standardize: works with conventional standard structures if True. It is recommended to keep this as True.
Returns:
connected_list: A numpy array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
atomi and atomj are the indices of the atoms in the input structure.
If any image of atomj is bonded to atomi with periodic boundary conditions, [atomi, atomj] is included in the list.
If atomi is bonded to multiple images of atomj, it is only counted once. |
383,182 | def cmd(send, msg, args):
parser = arguments.ArgParser(args[])
parser.add_argument(, nargs=, action=arguments.DateParser)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if not cmdargs.date:
send("Time until when?")
return
delta = dateutil.relativedelta.relativedelta(cmdargs.date, datetime.datetime.now())
diff = "%s is " % cmdargs.date.strftime("%x")
if delta.years:
diff += "%d years " % (delta.years)
if delta.months:
diff += "%d months " % (delta.months)
if delta.days:
diff += "%d days " % (delta.days)
if delta.hours:
diff += "%d hours " % (delta.hours)
if delta.minutes:
diff += "%d minutes " % (delta.minutes)
if delta.seconds:
diff += "%d seconds " % (delta.seconds)
diff += "away"
send(diff) | Reports the difference between now and some specified time.
Syntax: {command} <time> |
383,183 | def _normalize(value):
if hasattr(value, ):
value = value.value
if value is not None:
value = long(value)
return value | Normalize handle values. |
383,184 | def frictional_resistance_coef(length, speed, **kwargs):
Cf = 0.075 / (np.log10(reynolds_number(length, speed, **kwargs)) - 2) ** 2
return Cf | Flat plate frictional resistance of the ship according to ITTC formula.
ref: https://ittc.info/media/2021/75-02-02-02.pdf
:param length: metres length of the vehicle
:param speed: m/s speed of the vehicle
:param kwargs: optional could take in temperature to take account change of water property
:return: Frictional resistance coefficient of the vehicle |
383,185 | def check(self, instance):
istio_mesh_endpoint = instance.get()
istio_mesh_config = self.config_map[istio_mesh_endpoint]
self.process(istio_mesh_config)
process_mixer_endpoint = instance.get()
process_mixer_config = self.config_map[process_mixer_endpoint]
self.process(process_mixer_config) | Process both the istio_mesh instance and process_mixer instance associated with this instance |
383,186 | def is_success(self):
for _session in self._sessions.values():
if not _session.is_success():
return False
return True | check all sessions to see if they have completed successfully |
383,187 | def store_vdp_vsi(self, port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, reason):
if port_uuid in self.vdp_vif_map:
LOG.debug("Not Storing VDP VSI MAC %(mac)s UUID %(uuid)s",
{: mac, : vsiid})
if new_network:
vdp_vlan = reply
else:
vdp_vlan = vlan
vdp_dict = {: vdp_vlan,
: mgrid,
: typeid,
: typeid_ver,
: vsiid_frmt,
: vsiid,
: filter_frmt,
: mac,
: gid,
: vsw_cb_fn,
: vsw_cb_data,
: reason,
: 0}
self.vdp_vif_map[port_uuid] = vdp_dict
LOG.debug("Storing VDP VSI MAC %(mac)s UUID %(uuid)s VDP VLAN "
"%(vlan)s", {: mac, : vsiid, : vdp_vlan})
if oui_id:
self.store_oui(port_uuid, oui_id, oui_data) | Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason |
383,188 | async def whowas(self, nickname):
if protocol.ARGUMENT_SEPARATOR.search(nickname) is not None:
result = self.eventloop.create_future()
result.set_result(None)
return result
if nickname not in self._pending[]:
await self.rawmsg(, nickname)
self._whowas_info[nickname] = {}
self._pending[][nickname] = self.eventloop.create_future()
return await self._pending[][nickname] | Return information about offline user.
This is an blocking asynchronous method: it has to be called from a coroutine, as follows:
info = await self.whowas('Nick') |
383,189 | def output_paas(gandi, paas, datacenters, vhosts, output_keys, justify=11):
output_generic(gandi, paas, output_keys, justify)
if in output_keys:
output_line(gandi, , paas[], justify)
if in output_keys:
for entry in vhosts:
output_line(gandi, , entry, justify)
if in output_keys:
dc_name = paas[].get(,
paas[].get(, ))
output_line(gandi, , dc_name, justify)
if in paas:
df = paas[]
total = df[] + df[]
if total:
disk_used = % (df[] * 100 / total)
output_line(gandi, , disk_used, justify)
if in output_keys:
val = None
if paas[]:
val = paas[][]
output_line(gandi, , val, justify)
if in paas:
cache = paas[]
total = cache[] + cache[] + cache[] + cache[]
if total:
output_line(gandi, , None, justify)
for key in sorted(cache):
str_value = % (cache[key] * 100 / total)
output_sub_line(gandi, key, str_value, 5) | Helper to output a paas information. |
383,190 | def rot90(img):
s = img.shape
if len(s) == 3:
if s[2] in (3, 4):
out = np.empty((s[1], s[0], s[2]), dtype=img.dtype)
for i in range(s[2]):
out[:, :, i] = np.rot90(img[:, :, i])
else:
out = np.empty((s[0], s[2], s[1]), dtype=img.dtype)
for i in range(s[0]):
out[i] = np.rot90(img[i])
elif len(s) == 2:
out = np.rot90(img)
elif len(s) == 4 and s[3] in (3, 4):
out = np.empty((s[0], s[2], s[1], s[3]), dtype=img.dtype)
for i in range(s[0]):
for j in range(s[3]):
out[i, :, :, j] = np.rot90(img[i, :, :, j])
else:
NotImplemented
return out | rotate one or multiple grayscale or color images 90 degrees |
383,191 | async def inline_query(self, bot, query, *, offset=None, geo_point=None):
bot = await self.get_input_entity(bot)
result = await self(functions.messages.GetInlineBotResultsRequest(
bot=bot,
peer=types.InputPeerEmpty(),
query=query,
offset=offset or ,
geo_point=geo_point
))
return custom.InlineResults(self, result) | Makes the given inline query to the specified bot
i.e. ``@vote My New Poll`` would be as follows:
>>> client = ...
>>> client.inline_query('vote', 'My New Poll')
Args:
bot (`entity`):
The bot entity to which the inline query should be made.
query (`str`):
The query that should be made to the bot.
offset (`str`, optional):
The string offset to use for the bot.
geo_point (:tl:`GeoPoint`, optional)
The geo point location information to send to the bot
for localised results. Available under some bots.
Returns:
A list of `custom.InlineResult
<telethon.tl.custom.inlineresult.InlineResult>`. |
383,192 | def GetInput(self):
"Build the INPUT structure for the action"
actions = 1
if self.up and self.down:
actions = 2
inputs = (INPUT * actions)()
vk, scan, flags = self._get_key_info()
for inp in inputs:
inp.type = INPUT_KEYBOARD
inp._.ki.wVk = vk
inp._.ki.wScan = scan
inp._.ki.dwFlags |= flags
if self.up:
inputs[-1]._.ki.dwFlags |= KEYEVENTF_KEYUP
return inputs | Build the INPUT structure for the action |
383,193 | def _reconnect_delay(self):
if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED:
if self._reconnect_attempts >= len(self.RECONNECT_DELAYS):
return self.RECONNECT_DELAYS[-1]
else:
return self.RECONNECT_DELAYS[self._reconnect_attempts]
else:
return 0 | Calculate reconnection delay. |
383,194 | def hash(value, arg):
arg = str(arg).lower()
if sys.version_info >= (3,0):
value = value.encode("utf-8")
if not arg in get_available_hashes():
raise TemplateSyntaxError("The %s hash algorithm does not exist. Supported algorithms are: %" % (arg, get_available_hashes()))
try:
f = getattr(hashlib, arg)
hashed = f(value).hexdigest()
except Exception:
raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg)
return hashed | Returns a hex-digest of the passed in value for the hash algorithm given. |
383,195 | def stem(self, text):
stemmed_text =
for word in text.split():
if word not in self.stops:
word, in_que_pass_list = self._checkremove_que(word)
if not in_que_pass_list:
word, was_stemmed = self._matchremove_simple_endings(word)
return stemmed_text | Stem each word of the Latin text. |
383,196 | def query(usr, pwd, *hpo_terms):
raw_result = query_phenomizer(usr, pwd, *hpo_terms)
for line in raw_result.text.split():
if len(line) > 1:
if not line.startswith():
yield parse_result(line) | Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
yields:
parsed_term (dict): A dictionary with the parsed information
from phenomizer |
383,197 | def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen,
Gj=None, G=None):
nsampl = np.shape(estimated_source)[0]
nchan = np.shape(estimated_source)[1]
saveg = Gj is not None and G is not None
s_true = np.hstack((np.reshape(reference_sources[j],
(nsampl, nchan),
order="F").transpose(),
np.zeros((nchan, flen - 1))))
if saveg:
e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen, Gj)
else:
e_spat = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen)
e_spat = e_spat - s_true
if saveg:
e_interf, G = _project_images(reference_sources,
estimated_source, flen, G)
else:
e_interf = _project_images(reference_sources,
estimated_source, flen)
e_interf = e_interf - s_true - e_spat
e_artif = -s_true - e_spat - e_interf
e_artif[:, :nsampl] += estimated_source.transpose()
if saveg:
return (s_true, e_spat, e_interf, e_artif, Gj, G)
else:
return (s_true, e_spat, e_interf, e_artif) | Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
Adapted version to work with multichannel sources.
Improved performance can be gained by passing Gj and G parameters initially
as all zeros. These parameters store the results from the computation of
the G matrix in _project_images and then return them for subsequent calls
to this function. This only works when not computing permuations. |
383,198 | def _close_holding(self, trade):
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta | 应用平仓,并计算平仓盈亏
买平:
delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier
卖平:
delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier
:param trade: rqalpha.model.trade.Trade
:return: float |
383,199 | def domain_delete(domain, logger):
if domain is not None:
try:
if domain.isActive():
domain.destroy()
except libvirt.libvirtError:
logger.exception("Unable to destroy the domain.")
try:
domain.undefine()
except libvirt.libvirtError:
try:
domain.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
except libvirt.libvirtError:
logger.exception("Unable to undefine the domain.") | libvirt domain undefinition.
@raise: libvirt.libvirtError. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.