Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
13,800 | def __return_json(url):
with try_URL():
response = requests.get(url)
if response.status_code == 200:
return response.json()
else:
return False | Returns JSON data which is returned by querying the API service
Called by
- meaning()
- synonym()
:param url: the complete formatted url which is then queried using requests
:returns: json content being fed by the API |
13,801 | def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise | Extract a numpy array from a raster file. |
13,802 | def from_xmldict(cls, xml_dict):
name = xml_dict[]
kwargs = {}
if in xml_dict:
kwargs[] = xml_dict[]
return cls(name, **kwargs) | Create an `Author` from a datacite3 metadata converted by
`xmltodict`.
Parameters
----------
xml_dict : :class:`collections.OrderedDict`
A `dict`-like object mapping XML content for a single record (i.e.,
the contents of the ``record`` tag in OAI-PMH XML). This dict is
typically generated from :mod:`xmltodict`. |
13,803 | def estimate_size_in_bytes(cls, key, value, headers):
return (
cls.HEADER_STRUCT.size + cls.MAX_RECORD_OVERHEAD +
cls.size_of(key, value, headers)
) | Get the upper bound estimate on the size of record |
13,804 | def load(self, context):
try:
import tensorflow
except ImportError:
return
from tensorboard.plugins.beholder.beholder_plugin import BeholderPlugin
return BeholderPlugin(context) | Returns the plugin, if possible.
Args:
context: The TBContext flags.
Returns:
A BeholderPlugin instance or None if it couldn't be loaded. |
13,805 | def _http_get_json(self, url):
response = self._http_get(url)
content_type = response.headers[]
parsed_mimetype = mimeparse.parse_mime_type(content_type)
if parsed_mimetype[1] not in (, ):
raise PythonKCMeetupsNotJson(content_type)
try:
return json.loads(response.content)
except ValueError as e:
raise PythonKCMeetupsBadJson(e) | Make an HTTP GET request to the specified URL, check that it returned a
JSON response, and returned the data parsed from that response.
Parameters
----------
url
The URL to GET.
Returns
-------
Dictionary of data parsed from a JSON HTTP response.
Exceptions
----------
* PythonKCMeetupsBadJson
* PythonKCMeetupsBadResponse
* PythonKCMeetupsMeetupDown
* PythonKCMeetupsNotJson
* PythonKCMeetupsRateLimitExceeded |
13,806 | def list_(runas=None):
*
rubies = []
output = _rvm([], runas=runas)
if output:
regex = re.compile(r)
for line in output.splitlines():
match = regex.match(line)
if match:
rubies.append([
match.group(2), match.group(3), match.group(1) ==
])
return rubies | List all rvm-installed rubies
runas
The user under which to run rvm. If not specified, then rvm will be run
as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rvm.list |
13,807 | def hello(self):
for env in self.args.environment:
juicer.utils.Log.log_info("Trying to open a connection to %s, %s ...",
env, self.connectors[env].base_url)
try:
_r = self.connectors[env].get()
juicer.utils.Log.log_info("OK")
except JuicerError:
juicer.utils.Log.log_info("FAILED")
continue
juicer.utils.Log.log_info("Attempting to authenticate as %s",
self.connectors[env].auth[0])
_r = self.connectors[env].get()
if _r.status_code == Constants.PULP_GET_OK:
juicer.utils.Log.log_info("OK")
else:
juicer.utils.Log.log_info("FAILED")
juicer.utils.Log.log_info("Server said: %s", _r.content)
continue
return True | Test pulp server connections defined in ~/.config/juicer/config. |
13,808 | def delete_mutating_webhook_configuration(self, name, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs)
else:
(data) = self.delete_mutating_webhook_configuration_with_http_info(name, **kwargs)
return data | delete a MutatingWebhookConfiguration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
13,809 | def add_instruction (self, instr):
assert(isinstance(instr, Instruction))
self.instruction_list.append(instr)
if instr.lhs not in self.defined_variables:
if isinstance(instr.lhs, Variable):
self.defined_variables.append(instr.lhs)
if isinstance(instr, EqInstruction):
if isinstance(instr.rhs, Variable):
if instr.rhs not in self.used_variables:
self.used_variables.append(instr.rhs)
else:
if isinstance(instr.rhs_1, Variable):
if instr.rhs_1 not in self.used_variables:
self.used_variables.append(instr.rhs_1)
if isinstance(instr.rhs_2, Variable):
if instr.rhs_2 not in self.used_variables:
self.used_variables.append(instr.rhs_2) | Adds the argument instruction in the list of instructions of this basic block.
Also updates the variable lists (used_variables, defined_variables) |
13,810 | def varvalu(self, varn=None):
self.ignore(whitespace)
if varn is None:
varn = self.varname()
varv = s_ast.VarValue(kids=[varn])
while self.more():
if self.nextstr():
varv = self.varderef(varv)
continue
if self.nextstr():
varv = self.varcall(varv)
continue
break
return varv | $foo
$foo.bar
$foo.bar()
$foo[0]
$foo.bar(10) |
13,811 | def processRequest(cls, ps, **kw):
resource = kw[]
method = resource.getOperation(ps, None)
rsp = method(ps, **kw)[1]
return rsp | invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request |
13,812 | def remove(self,
package,
shutit_pexpect_child=None,
options=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None):
shutit_global.shutit_global_object.yield_to_draw()
if package.find() != -1:
for p in package.split():
self.install(p,shutit_pexpect_child=shutit_pexpect_child,options=options,timeout=timeout,note=note)
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.remove(package,
echo=echo,
options=options,
timeout=timeout,
note=note) | Distro-independent remove function.
Takes a package name and runs relevant remove function.
@param package: Package to remove, which is run through package_map.
@param shutit_pexpect_child: See send()
@param options: Dict of options to pass to the remove command,
mapped by install_type.
@param timeout: See send(). Default: 3600
@param note: See send()
@return: True if all ok (i.e. the package was successfully removed),
False otherwise.
@rtype: boolean |
13,813 | def get_json_files(files, recursive=False):
json_files = []
if not files:
return json_files
for fn in files:
if os.path.isdir(fn):
children = list_json_files(fn, recursive)
json_files.extend(children)
elif is_json(fn):
json_files.append(fn)
else:
continue
if not json_files:
raise NoJSONFileFoundError("No JSON files found!")
return json_files | Return a list of files to validate from `files`. If a member of `files`
is a directory, its children with a ``.json`` extension will be added to
the return value.
Args:
files: A list of file paths and/or directory paths.
recursive: If ``true``, this will descend into any subdirectories
of input directories.
Returns:
A list of file paths to validate. |
13,814 | def persist(filename):
dem_projected = obtain_to(filename)
with nc.loader(filename) as root:
data = nc.getvar(root, )
dem = nc.getvar(root, , , source=data)
stack = [dim for dim in dem.shape if dim not in dem_projected.shape]
stack = stack[0] if stack else 1
dem[:] = np.vstack(map(lambda x: [dem_projected], range(stack))) | Append the digital elevation map projected (using lat lon) as variables of
the netcdf file.
Keyword arguments:
filename -- the name of a netcdf file. |
13,815 | def _recover_public_key(G, order, r, s, i, e):
c = G.curve()
x = r + (i // 2) * order
alpha = (x * x * x + c.a() * x + c.b()) % c.p()
beta = pycoin.ecdsa.numbertheory.modular_sqrt(alpha, c.p())
y = beta if (beta - i) % 2 == 0 else c.p() - beta
R = pycoin.ecdsa.ellipticcurve.Point(c, x, y, order)
rInv = pycoin.ecdsa.numbertheory.inverse_mod(r, order)
eNeg = -e % order
Q = rInv * (s * R + eNeg * G)
return Q | Recover a public key from a signature.
See SEC 1: Elliptic Curve Cryptography, section 4.1.6, "Public
Key Recovery Operation".
http://www.secg.org/sec1-v2.pdf |
13,816 | def update_endpoint(self, endpoint_name, endpoint_config_name):
if not _deployment_entity_exists(lambda: self.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)):
raise ValueError(
.format(endpoint_name))
self.sagemaker_client.update_endpoint(EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
return endpoint_name | Update an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request
Raise an error if endpoint with endpoint_name does not exist.
Args:
endpoint_name (str): Name of the Amazon SageMaker ``Endpoint`` to update.
endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to deploy.
Returns:
str: Name of the Amazon SageMaker ``Endpoint`` being updated. |
13,817 | def program_global_reg(self):
self._clear_strobes()
gr_size = len(self[][:])
self[][][0:gr_size] = self[][:]
self[][][0:gr_size] = bitarray(gr_size * )
self[][][gr_size + 1:gr_size + 2] = bitarray("1")
self[][][gr_size + 1:gr_size + 2] = bitarray("1")
self._run_seq(gr_size + 3) | Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers. |
13,818 | def find_connected_atoms(struct, tolerance=0.45, ldict=JmolNN().el_radius):
n_atoms = len(struct.species)
fc = np.array(struct.frac_coords)
fc_copy = np.repeat(fc[:, :, np.newaxis], 27, axis=2)
neighbors = np.array(list(itertools.product([0, 1, -1], [0, 1, -1], [0, 1, -1]))).T
neighbors = np.repeat(neighbors[np.newaxis, :, :], 1, axis=0)
fc_diff = fc_copy - neighbors
species = list(map(str, struct.species))
for i, item in enumerate(species):
if not item in ldict.keys():
species[i] = str(Specie.from_string(item).element)
latmat = struct.lattice.matrix
connected_matrix = np.zeros((n_atoms,n_atoms))
for i in range(n_atoms):
for j in range(i + 1, n_atoms):
max_bond_length = ldict[species[i]] + ldict[species[j]] + tolerance
frac_diff = fc_diff[j] - fc_copy[i]
distance_ij = np.dot(latmat.T, frac_diff)
if sum(np.linalg.norm(distance_ij, axis=0) < max_bond_length) > 0:
connected_matrix[i, j] = 1
connected_matrix[j, i] = 1
return connected_matrix | Finds bonded atoms and returns a adjacency matrix of bonded atoms.
Author: "Gowoon Cheon"
Email: "[email protected]"
Args:
struct (Structure): Input structure
tolerance: length in angstroms used in finding bonded atoms. Two atoms
are considered bonded if (radius of atom 1) + (radius of atom 2) +
(tolerance) < (distance between atoms 1 and 2). Default
value = 0.45, the value used by JMol and Cheon et al.
ldict: dictionary of bond lengths used in finding bonded atoms. Values
from JMol are used as default
Returns:
(np.ndarray): A numpy array of shape (number of atoms, number of atoms);
If any image of atom j is bonded to atom i with periodic boundary
conditions, the matrix element [atom i, atom j] is 1. |
13,819 | def move(self, dst, **kwargs):
_fs, filename = opener.parse(self.uri)
_fs_dst, filename_dst = opener.parse(dst)
movefile(_fs, filename, _fs_dst, filename_dst, **kwargs)
self.uri = dst | Move file to a new destination and update ``uri``. |
13,820 | def system_monitor_sfp_alert_state(self, **kwargs):
config = ET.Element("config")
system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
sfp = ET.SubElement(system_monitor, "sfp")
alert = ET.SubElement(sfp, "alert")
state = ET.SubElement(alert, "state")
state.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
13,821 | def get_layout(self, object):
layout = self.create_layout(object)
if isinstance(layout, Component):
layout = Layout(layout)
if isinstance(layout, list):
layout = Layout(*layout)
for update_layout in self.layout_updates:
update_layout(layout, object)
layout.set_object(object)
return layout | Get complete layout for given object |
13,822 | def cancel():
cancel = threading.Event()
def cancel_execution(signum, frame):
signame = SIGNAL_NAMES.get(signum, signum)
logger.info("Signal %s received, quitting "
"(this can take some time)...", signame)
cancel.set()
signal.signal(signal.SIGINT, cancel_execution)
signal.signal(signal.SIGTERM, cancel_execution)
return cancel | Returns a threading.Event() that will get set when SIGTERM, or
SIGINT are triggered. This can be used to cancel execution of threads. |
13,823 | def redact_image(
self,
parent,
inspect_config=None,
image_redaction_configs=None,
include_findings=None,
byte_item=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "redact_image" not in self._inner_api_calls:
self._inner_api_calls[
"redact_image"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.redact_image,
default_retry=self._method_configs["RedactImage"].retry,
default_timeout=self._method_configs["RedactImage"].timeout,
client_info=self._client_info,
)
request = dlp_pb2.RedactImageRequest(
parent=parent,
inspect_config=inspect_config,
image_redaction_configs=image_redaction_configs,
include_findings=include_findings,
byte_item=byte_item,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["redact_image"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Redacts potentially sensitive info from an image.
This method has limits on input size, processing time, and output size.
See https://cloud.google.com/dlp/docs/redacting-sensitive-data-images to
learn more.
When no InfoTypes or CustomInfoTypes are specified in this request, the
system will automatically choose what detectors to run. By default this may
be all types, but may change over time as detectors are updated.
Example:
>>> from google.cloud import dlp_v2
>>>
>>> client = dlp_v2.DlpServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.redact_image(parent)
Args:
parent (str): The parent resource name, for example projects/my-project-id.
inspect_config (Union[dict, ~google.cloud.dlp_v2.types.InspectConfig]): Configuration for the inspector.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.InspectConfig`
image_redaction_configs (list[Union[dict, ~google.cloud.dlp_v2.types.ImageRedactionConfig]]): The configuration for specifying what content to redact from images.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ImageRedactionConfig`
include_findings (bool): Whether the response should include findings along with the redacted
image.
byte_item (Union[dict, ~google.cloud.dlp_v2.types.ByteContentItem]): The content must be PNG, JPEG, SVG or BMP.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dlp_v2.types.ByteContentItem`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dlp_v2.types.RedactImageResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
13,824 | def search(filter,
dn=None,
scope=None,
attrs=None,
**kwargs):
ldaphostmyhostcountresultscn=myhost,ou=hosts,o=acme,c=gbsaltKeyValuentpserver=ntp.acme.localfoo=myfoosaltStatefoobartimehuman1.2msraw0.00123ldaphostlocalhost7393ssh
if not dn:
dn = _config(, )
if not scope:
scope = _config()
if attrs == :
attrs = None
elif attrs is None:
attrs = _config()
_ldap = _connect(**kwargs)
start = time.time()
log.debug(
, filter, dn, scope, attrs
)
results = _ldap.search_s(dn, int(scope), filter, attrs)
elapsed = (time.time() - start)
if elapsed < 0.200:
elapsed_h = six.text_type(round(elapsed * 1000, 1)) +
else:
elapsed_h = six.text_type(round(elapsed, 2)) +
ret = {
: results,
: len(results),
: {: elapsed_h, : six.text_type(round(elapsed, 5))},
}
return ret | Run an arbitrary LDAP query and return the results.
CLI Example:
.. code-block:: bash
salt 'ldaphost' ldap.search "filter=cn=myhost"
Return data:
.. code-block:: python
{'myhost': {'count': 1,
'results': [['cn=myhost,ou=hosts,o=acme,c=gb',
{'saltKeyValue': ['ntpserver=ntp.acme.local',
'foo=myfoo'],
'saltState': ['foo', 'bar']}]],
'time': {'human': '1.2ms', 'raw': '0.00123'}}}
Search and connection options can be overridden by specifying the relevant
option as key=value pairs, for example:
.. code-block:: bash
salt 'ldaphost' ldap.search filter=cn=myhost dn=ou=hosts,o=acme,c=gb
scope=1 attrs='' server='localhost' port='7393' tls=True bindpw='ssh' |
13,825 | def validate_v_rgb(value):
if len(value) != 6:
raise vol.Invalid(
.format(value))
return validate_hex(value) | Validate a V_RGB value. |
13,826 | def create_locks(context, network_ids, addresses):
for address in addresses:
address_model = None
try:
address_model = _find_or_create_address(
context, network_ids, address)
lock_holder = None
if address_model.lock_id:
lock_holder = db_api.lock_holder_find(
context,
lock_id=address_model.lock_id, name=LOCK_NAME,
scope=db_api.ONE)
if not lock_holder:
LOG.info("Creating lock holder on IPAddress %s with id %s",
address_model.address_readable,
address_model.id)
db_api.lock_holder_create(
context, address_model, name=LOCK_NAME, type="ip_address")
except Exception:
LOG.exception("Failed to create lock holder on IPAddress %s",
address_model)
continue
context.session.flush() | Creates locks for each IP address that is null-routed.
The function creates the IP address if it is not present in the database. |
13,827 | def _index_verify(index_file, **extra_kwargs):
side_effect = extra_kwargs.pop("side_effect", None)
with open(TEMPLATE_FILE, "r") as file_obj:
template = file_obj.read()
template_kwargs = {
"code_block1": SPHINX_CODE_BLOCK1,
"code_block2": SPHINX_CODE_BLOCK2,
"code_block3": SPHINX_CODE_BLOCK3,
"testcleanup": TEST_CLEANUP,
"toctree": TOCTREE,
"bernstein_basis": BERNSTEIN_BASIS_SPHINX,
"bezier_defn": BEZIER_DEFN_SPHINX,
"sum_to_unity": SUM_TO_UNITY_SPHINX,
"img_prefix": "",
"extra_links": "",
"docs": "",
"docs_img": "",
"pypi": "\n\n|pypi| ",
"pypi_img": PYPI_IMG,
"versions": "|versions|\n\n",
"versions_img": VERSIONS_IMG,
"rtd_version": RTD_VERSION,
"revision": REVISION,
"circleci_badge": CIRCLECI_BADGE,
"circleci_path": "",
"travis_badge": TRAVIS_BADGE,
"travis_path": "",
"appveyor_badge": APPVEYOR_BADGE,
"appveyor_path": "",
"coveralls_badge": COVERALLS_BADGE,
"coveralls_path": COVERALLS_PATH,
"zenodo": "|zenodo|",
"zenodo_img": ZENODO_IMG,
"joss": " |JOSS|",
"joss_img": JOSS_IMG,
}
template_kwargs.update(**extra_kwargs)
expected = template.format(**template_kwargs)
if side_effect is not None:
expected = side_effect(expected)
with open(index_file, "r") as file_obj:
contents = file_obj.read()
if contents != expected:
err_msg = "\n" + get_diff(
contents,
expected,
index_file + ".actual",
index_file + ".expected",
)
raise ValueError(err_msg)
else:
rel_name = os.path.relpath(index_file, _ROOT_DIR)
msg = "{} contents are as expected.".format(rel_name)
print(msg) | Populate the template and compare to documentation index file.
Used for both ``docs/index.rst`` and ``docs/index.rst.release.template``.
Args:
index_file (str): Filename to compare against.
extra_kwargs (Dict[str, str]): Over-ride for template arguments.
One **special** keyword is ``side_effect``, which can be used
to update the template output after the fact.
Raises:
ValueError: If the current ``index.rst`` doesn't agree with the
expected value computed from the template. |
13,828 | def unmark_featured(self, request, queryset):
queryset.update(featured=False)
self.message_user(
request, _()) | Un-Mark selected featured posts. |
13,829 | def get_complex_and_node_state(self, hosts, services):
states = [s.get_state(hosts, services) for s in self.sons]
if 2 in states:
worst_state = 2
else:
worst_state = max(states)
if self.not_value:
return self.get_reverse_state(worst_state)
return worst_state | Get state , handle AND aggregation ::
* Get the worst state. 2 or max of sons (3 <=> UNKNOWN < CRITICAL <=> 2)
* Revert if it's a not node
:param hosts: host objects
:param services: service objects
:return: 0, 1 or 2
:rtype: int |
13,830 | def getFaxResultRN(self, CorpNum, RequestNum, UserID=None):
if RequestNum == None or RequestNum == :
raise PopbillException(-99999999, "์์ฒญ๋ฒํธ๊ฐ ์
๋ ฅ๋์ง ์์์ต๋๋ค.")
return self._httpget( + RequestNum, CorpNum, UserID) | ํฉ์ค ์ ์ก๊ฒฐ๊ณผ ์กฐํ
args
CorpNum : ํ๋นํ์ ์ฌ์
์๋ฒํธ
RequestNum : ์ ์ก์์ฒญ์ ํ ๋นํ ์ ์ก์์ฒญ๋ฒํธ
UserID : ํ๋นํ์ ์์ด๋
return
ํฉ์ค์ ์ก์ ๋ณด as list
raise
PopbillException |
13,831 | def addSplits(self, login, tableName, splits):
self.send_addSplits(login, tableName, splits)
self.recv_addSplits() | Parameters:
- login
- tableName
- splits |
13,832 | def _get_types_from_sample(result_vars, sparql_results_json):
total_bindings = len(sparql_results_json[][])
homogeneous_types = {}
for result_var in result_vars:
var_types = set()
var_datatypes = set()
for i in range(0, min(total_bindings, 10)):
binding = sparql_results_json[][][i]
rdf_term = binding.get(result_var)
if rdf_term is not None:
var_types.add(rdf_term.get())
var_datatypes.add(rdf_term.get())
if len(var_types) > 1 or len(var_datatypes) > 1:
return None
else:
homogeneous_types[result_var] = {
: var_types.pop() if var_types else None,
: var_datatypes.pop() if var_datatypes else None
}
return homogeneous_types | Return types if homogenous within sample
Compare up to 10 rows of results to determine homogeneity.
DESCRIBE and CONSTRUCT queries, for example,
:param result_vars:
:param sparql_results_json: |
13,833 | def to_key(literal_or_identifier):
if literal_or_identifier[] == :
return literal_or_identifier[]
elif literal_or_identifier[] == :
k = literal_or_identifier[]
if isinstance(k, float):
return unicode(float_repr(k))
elif in literal_or_identifier:
return compose_regex(k)
elif isinstance(k, bool):
return u if k else u
elif k is None:
return u
else:
return unicode(k) | returns string representation of this object |
13,834 | def fetch_and_parse(method, uri, params_prefix=None, **params):
doc = ElementTree.parse(fetch(method, uri, params_prefix, **params))
return _parse(doc.getroot()) | Fetch the given uri and return the root Element of the response. |
13,835 | def _report_disk_stats(self):
stats = {
: None,
: None,
: None,
: None,
: None,
: None
}
info = self.docker_util.client.info()
driver_status = info.get(, [])
if not driver_status:
self.log.warning(
)
return
for metric in driver_status:
if len(metric) == 2 and in metric[0]:
mtype =
if in metric[0]:
mtype =
if in metric[0]:
stats[.format(mtype)] = metric[1]
elif in metric[0]:
stats[.format(mtype)] = metric[1]
elif in metric[0]:
stats[.format(mtype)] = metric[1]
stats = self._format_disk_metrics(stats)
stats.update(self._calc_percent_disk_stats(stats))
tags = self._get_tags()
for name, val in stats.iteritems():
if val is not None:
self.gauge(name, val, tags) | Report metrics about the volume space usage |
13,836 | def calculate_path(self, remote_relative_path, input_type):
directory, allow_nested_files = self._directory_for_file_type(input_type)
return self.path_helper.remote_join(directory, remote_relative_path) | Only for used by Pulsar client, should override for managers to
enforce security and make the directory if needed. |
13,837 | def _draw_text(self, pos, text, font, **kw):
self.drawables.append((pos, text, font, kw)) | Remember a single drawable tuple to paint later. |
13,838 | def check_index(self, key, *, index):
self.append({
"Verb": "check-index",
"Key": key,
"Index": extract_attr(index, keys=["ModifyIndex", "Index"])
})
return self | Fails the transaction if Key does not have a modify index equal to
Index
Parameters:
key (str): Key to check
index (ObjectIndex): Index ID |
13,839 | def _raise_unrecoverable_error_client(self, exception):
message = (
+ self.DEPENDENCY +
+ repr(exception) +
)
raise exceptions.ClientError(message, client_exception=exception) | Raises an exceptions.ClientError with a message telling that the error probably comes from the client
configuration.
:param exception: Exception that caused the ClientError
:type exception: Exception
:raise exceptions.ClientError |
13,840 | def get_arp_output_arp_entry_ip_address(self, **kwargs):
config = ET.Element("config")
get_arp = ET.Element("get_arp")
config = get_arp
output = ET.SubElement(get_arp, "output")
arp_entry = ET.SubElement(output, "arp-entry")
ip_address = ET.SubElement(arp_entry, "ip-address")
ip_address.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
13,841 | def _spelling_pipeline(self, sources, options, personal_dict):
for source in self._pipeline_step(sources, options, personal_dict):
if encoding.startswith((, )):
encoding =
text = source.text.encode(encoding)
self.log(, 3)
self.log(text, 3)
cmd = self.setup_command(encoding, options, personal_dict)
self.log("Command: " + str(cmd), 4)
try:
wordlist = util.call_spellchecker(cmd, input_text=text, encoding=encoding)
yield Results(
[w for w in sorted(set(wordlist.replace(, ).split())) if w],
source.context,
source.category
)
except Exception as e:
err = self.get_error(e)
yield Results([], source.context, source.category, err) | Check spelling pipeline. |
13,842 | def generate_sky_catalog(image, refwcs, **kwargs):
source_cats = generate_source_catalog(image, **kwargs)
master_cat = None
numSci = countExtn(image, extname=)
if refwcs is None:
refwcs = build_reference_wcs([image])
for chip in range(numSci):
chip += 1
seg_tab_phot = source_cats[chip]
if seg_tab_phot is None:
continue
chip_wcs = wcsutil.HSTWCS(image, ext=(, chip))
seg_ra, seg_dec = chip_wcs.all_pix2world(seg_tab_phot[], seg_tab_phot[], 1)
seg_xy_out = refwcs.all_world2pix(seg_ra, seg_dec, 1)
seg_tab_phot[] = seg_xy_out[0]
seg_tab_phot[] = seg_xy_out[1]
if master_cat is None:
master_cat = seg_tab_phot
else:
master_cat = vstack([master_cat, seg_tab_phot])
return master_cat | Build source catalog from input image using photutils.
This script borrows heavily from build_source_catalog.
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS `refwcs`. The sources will be
- identified using photutils segmentation-based source finding code
- ignore any input pixel which has been flagged as 'bad' in the DQ
array, should a DQ array be found in the input HDUList.
- classified as probable cosmic-rays (if enabled) using central_moments
properties of each source, with these sources being removed from the
catalog.
Parameters
----------
image : ~astropy.io.fits.HDUList`
Input image.
refwcs : `~stwcs.wcsutils.HSTWCS`
Definition of the reference frame WCS.
dqname : str
EXTNAME for the DQ array, if present, in the input image.
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip. Default: None (False)
threshold : float, optional
This parameter controls the S/N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the 'threshold' parameter in 'tweakreg' works.
fwhm : float, optional
FWHM (in pixels) of the expected sources from the image, comparable to the
'conv_width' parameter from 'tweakreg'. Objects with FWHM closest to
this value will be identified as sources in the catalog.
Returns
--------
master_cat : `~astropy.table.Table`
Source catalog for all 'valid' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame. |
13,843 | def string2json(self, string):
kwargs = {
: BytesEncoder,
: 1,
: True,
: (, ),
}
return cast_unicode(json.dumps(string, **kwargs), ) | Convert json into its string representation.
Used for writing outputs to markdown. |
13,844 | def RIBSystemRouteLimitExceeded_originator_switch_info_switchIpV6Address(self, **kwargs):
config = ET.Element("config")
RIBSystemRouteLimitExceeded = ET.SubElement(config, "RIBSystemRouteLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
originator_switch_info = ET.SubElement(RIBSystemRouteLimitExceeded, "originator-switch-info")
switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address")
switchIpV6Address.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
13,845 | def _load_cell(args, cell_body):
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get() or []
if parameters:
jsonschema.validate({: parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA)
name = google.datalab.bigquery.Query.resolve_parameters(args[], parameters)
table = _get_table(name)
if not table:
table = bigquery.Table(name)
if args[] == :
if table.exists():
raise Exception( % name)
if not cell_body or not in cell_body:
raise Exception()
schema = config[]
if not isinstance(schema, bigquery.Schema):
jsonschema.validate({: schema}, BigQuerySchema.TABLE_SCHEMA_SCHEMA)
schema = bigquery.Schema(schema)
table.create(schema=schema)
elif not table.exists():
raise Exception( % name)
csv_options = bigquery.CSVOptions(delimiter=args[], skip_leading_rows=args[],
allow_jagged_rows=not args[], quote=args[])
path = google.datalab.bigquery.Query.resolve_parameters(args[], parameters)
job = table.load(path, mode=args[], source_format=args[], csv_options=csv_options,
ignore_unknown_values=not args[])
if job.failed:
raise Exception( % str(job.fatal_error))
elif job.errors:
raise Exception( % str(job.errors)) | Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bq load <optional args>
Args:
args: the arguments following '%bq load'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
A message about whether the load succeeded or failed. |
13,846 | def _parseAttrs(self, attrsStr):
attributes = dict()
for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr):
name, vals = self._parseAttrVal(attrStr)
if name in attributes:
raise GFF3Exception(
"duplicated attribute name: {}".format(name),
self.fileName, self.lineNumber)
attributes[name] = vals
return attributes | Parse the attributes and values |
13,847 | def currentView(cls, parent=None):
if parent is None:
parent = projexui.topWindow()
for inst in parent.findChildren(cls):
if inst.isCurrent():
return inst
return None | Returns the current view for the given class within a viewWidget. If
no view widget is supplied, then a blank view is returned.
:param viewWidget | <projexui.widgets.xviewwidget.XViewWidget> || None
:return <XView> || None |
13,848 | def retrieve_activity_profile(self, activity, profile_id):
if not isinstance(activity, Activity):
activity = Activity(activity)
request = HTTPRequest(
method="GET",
resource="activities/profile",
ignore404=True
)
request.query_params = {
"profileId": profile_id,
"activityId": activity.id
}
lrs_response = self._send_request(request)
if lrs_response.success:
doc = ActivityProfileDocument(
id=profile_id,
content=lrs_response.data,
activity=activity
)
headers = lrs_response.response.getheaders()
if "lastModified" in headers and headers["lastModified"] is not None:
doc.timestamp = headers["lastModified"]
if "contentType" in headers and headers["contentType"] is not None:
doc.content_type = headers["contentType"]
if "etag" in headers and headers["etag"] is not None:
doc.etag = headers["etag"]
lrs_response.content = doc
return lrs_response | Retrieve activity profile with the specified parameters
:param activity: Activity object of the desired activity profile
:type activity: :class:`tincan.activity.Activity`
:param profile_id: UUID of the desired profile
:type profile_id: str | unicode
:return: LRS Response object with an activity profile doc as content
:rtype: :class:`tincan.lrs_response.LRSResponse` |
13,849 | def parse_setup(raw_frames, destination_frame=None, header=0, separator=None, column_names=None,
column_types=None, na_strings=None, skipped_columns=None, custom_non_data_line_markers=None):
coltype = U(None, "unknown", "uuid", "string", "float", "real", "double", "int", "numeric",
"categorical", "factor", "enum", "time")
natype = U(str, [str])
assert_is_type(raw_frames, str, [str])
assert_is_type(destination_frame, None, str)
assert_is_type(header, -1, 0, 1)
assert_is_type(separator, None, I(str, lambda s: len(s) == 1))
assert_is_type(column_names, [str], None)
assert_is_type(column_types, [coltype], {str: coltype}, None)
assert_is_type(na_strings, [natype], {str: natype}, None)
check_frame_id(destination_frame)
if is_type(raw_frames, str): raw_frames = [raw_frames]
kwargs = {"check_header": header, "source_frames": [quoted(frame_id) for frame_id in raw_frames]}
if separator:
kwargs["separator"] = ord(separator)
if custom_non_data_line_markers is not None:
kwargs["custom_non_data_line_markers"] = custom_non_data_line_markers;
j = api("POST /3/ParseSetup", data=kwargs)
if "warnings" in j and j["warnings"]:
for w in j["warnings"]:
warnings.warn(w)
if destination_frame:
j["destination_frame"] = destination_frame
parse_column_len = len(j["column_types"]) if skipped_columns is None else (len(j["column_types"])-len(skipped_columns))
tempColumnNames = j["column_names"] if j["column_names"] is not None else gen_header(j["number_columns"])
useType = [True]*len(tempColumnNames)
if skipped_columns is not None:
useType = [True]*len(tempColumnNames)
for ind in range(len(tempColumnNames)):
if ind in skipped_columns:
useType[ind]=False
if column_names is not None:
if not isinstance(column_names, list): raise ValueError("col_names should be a list")
if (skipped_columns is not None) and len(skipped_columns)>0:
if (len(column_names)) != parse_column_len:
raise ValueError(
"length of col_names should be equal to the number of columns parsed: %d vs %d"
% (len(column_names), parse_column_len))
else:
if len(column_names) != len(j["column_types"]): raise ValueError(
"length of col_names should be equal to the number of columns: %d vs %d"
% (len(column_names), len(j["column_types"])))
j["column_names"] = column_names
counter = 0
for ind in range(len(tempColumnNames)):
if useType[ind]:
tempColumnNames[ind]=column_names[counter]
counter=counter+1
if (column_types is not None):
if isinstance(column_types, dict):
if j["column_names"] is None:
j["column_names"] = gen_header(j["number_columns"])
if not set(column_types.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in col_types is not a subset of the column names")
idx = 0
column_types_list = []
for name in tempColumnNames:
if name in column_types:
column_types_list.append(column_types[name])
else:
column_types_list.append(j["column_types"][idx])
idx += 1
column_types = column_types_list
elif isinstance(column_types, list):
if len(column_types) != parse_column_len: raise ValueError(
"length of col_types should be equal to the number of parsed columns")
column_types_list = j["column_types"]
counter = 0
for ind in range(len(j["column_types"])):
if useType[ind] and (column_types[counter]!=None):
column_types_list[ind]=column_types[counter]
counter=counter+1
column_types = column_types_list
else:
raise ValueError("col_types should be a list of types or a dictionary of column names to types")
j["column_types"] = column_types
if na_strings is not None:
if isinstance(na_strings, dict):
if not j["column_names"]: raise ValueError("column names should be specified")
if not set(na_strings.keys()).issubset(set(j["column_names"])): raise ValueError(
"names specified in na_strings is not a subset of the column names")
j["na_strings"] = [[] for _ in range(len(j["column_names"]))]
for name, na in na_strings.items():
idx = j["column_names"].index(name)
if is_type(na, str): na = [na]
for n in na: j["na_strings"][idx].append(quoted(n))
elif is_type(na_strings, [[str]]):
if len(na_strings) != len(j["column_types"]):
raise ValueError("length of na_strings should be equal to the number of columns")
j["na_strings"] = [[quoted(na) for na in col] if col is not None else [] for col in na_strings]
elif isinstance(na_strings, list):
j["na_strings"] = [[quoted(na) for na in na_strings]] * len(j["column_types"])
else:
raise ValueError(
"na_strings should be a list, a list of lists (one list per column), or a dictionary of column "
"names to strings which are to be interpreted as missing values")
if skipped_columns is not None:
if isinstance(skipped_columns, list):
j["skipped_columns"] = []
for colidx in skipped_columns:
if (colidx < 0): raise ValueError("skipped column index cannot be negative")
j["skipped_columns"].append(colidx)
if j["column_names"]: j["column_names"] = list(map(quoted, j["column_names"]))
j["column_types"] = list(map(quoted, j["column_types"]))
return j | Retrieve H2O's best guess as to what the structure of the data file is.
During parse setup, the H2O cluster will make several guesses about the attributes of
the data. This method allows a user to perform corrective measures by updating the
returning dictionary from this method. This dictionary is then fed into `parse_raw` to
produce the H2OFrame instance.
:param raw_frames: a collection of imported file frames
:param destination_frame: The unique hex key assigned to the imported file. If none is given, a key will
automatically be generated.
:param header: -1 means the first line is data, 0 means guess, 1 means first line is header.
:param separator: The field separator character. Values on each line of the file are separated by
this character. If not provided, the parser will automatically detect the separator.
:param column_names: A list of column names for the file. If skipped_columns are specified, only list column names
of columns that are not skipped.
:param column_types: A list of types or a dictionary of column names to types to specify whether columns
should be forced to a certain type upon import parsing. If a list, the types for elements that are
one will be guessed. If skipped_columns are specified, only list column types of columns that are not skipped.
The possible types a column may have are:
- "unknown" - this will force the column to be parsed as all NA
- "uuid" - the values in the column must be true UUID or will be parsed as NA
- "string" - force the column to be parsed as a string
- "numeric" - force the column to be parsed as numeric. H2O will handle the compression of the numeric
data in the optimal manner.
- "enum" - force the column to be parsed as a categorical column.
- "time" - force the column to be parsed as a time column. H2O will attempt to parse the following
list of date time formats: (date) "yyyy-MM-dd", "yyyy MM dd", "dd-MMM-yy", "dd MMM yy", (time)
"HH:mm:ss", "HH:mm:ss:SSS", "HH:mm:ss:SSSnnnnnn", "HH.mm.ss" "HH.mm.ss.SSS", "HH.mm.ss.SSSnnnnnn".
Times can also contain "AM" or "PM".
:param na_strings: A list of strings, or a list of lists of strings (one list per column), or a dictionary
of column names to strings which are to be interpreted as missing values.
:param skipped_columns: an integer lists of column indices to skip and not parsed into the final frame from the import file.
:param custom_non_data_line_markers: If a line in imported file starts with any character in given string it will NOT be imported. Empty string means all lines are imported, None means that default behaviour for given format will be used
:returns: a dictionary containing parse parameters guessed by the H2O backend. |
13,850 | def getTrackedDeviceIndexForControllerRole(self, unDeviceType):
fn = self.function_table.getTrackedDeviceIndexForControllerRole
result = fn(unDeviceType)
return result | Returns the device index associated with a specific role, for example the left hand or the right hand. This function is deprecated in favor of the new IVRInput system. |
13,851 | def start(self):
Global.LOGGER.info("starting the flow manager")
self._start_actions()
self._start_message_fetcher()
Global.LOGGER.debug("flow manager started") | Start all the processes |
13,852 | def header_canonical(self, header_name):
header_name = header_name.lower()
if header_name == :
return
elif header_name == :
return
return % header_name.replace(, ).upper() | Translate HTTP headers to Django header names. |
13,853 | def escape(s, quote=False):
if s is None:
return
if not isinstance(s, (str, bytes)):
s = str(s)
if isinstance(s, bytes):
try:
s.decode()
except UnicodeDecodeError:
s = s.decode(, )
s = s.replace(, ).replace(, ).replace(, )
if quote:
s = s.replace(, """)
return s | Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes. |
13,854 | def read_function(data, window, ij, g_args):
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output | Takes an array, and sets any value above the mean to the max, the rest to 0 |
13,855 | def download_album_by_id(self, album_id, album_name):
try:
songs = self.crawler.get_album_songs(album_id)
except RequestException as exception:
click.echo(exception)
else:
folder = os.path.join(self.folder, album_name)
for song in songs:
self.download_song_by_id(song.song_id, song.song_name, folder) | Download a album by its name.
:params album_id: album id.
:params album_name: album name. |
13,856 | def granularity_to_time(s):
mfact = {
: 1,
: 60,
: 3600,
: 86400,
: 604800,
}
try:
f, n = re.match("(?P<f>[SMHDW])(?:(?P<n>\d+)|)", s).groups()
n = n if n else 1
return mfact[f] * int(n)
except Exception as e:
raise ValueError(e) | convert a named granularity into seconds.
get value in seconds for named granularities: M1, M5 ... H1 etc.
>>> print(granularity_to_time("M5"))
300 |
13,857 | def add_inverse_query(self, key_val={}):
q = Q("match", **key_val)
self.search = self.search.query(~q)
return self | Add an es_dsl inverse query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods |
13,858 | def route_election(self, election):
if (
election.election_type.slug == ElectionType.GENERAL
or ElectionType.GENERAL_RUNOFF
):
self.bootstrap_general_election(election)
elif election.race.special:
self.bootstrap_special_election(election)
if election.race.office.is_executive:
self.bootstrap_executive_office(election)
else:
self.bootstrap_legislative_office(election) | Legislative or executive office? |
13,859 | def get_network_attributegroup_items(network_id, **kwargs):
user_id=kwargs.get()
net_i = _get_network(network_id)
net_i.check_read_permission(user_id)
group_items_i = db.DBSession.query(AttrGroupItem).filter(
AttrGroupItem.network_id==network_id).all()
return group_items_i | Get all the group items in a network |
13,860 | def list_names():
names = get_all_names()
nameslen = len(names)
print(.format(nameslen))
namewidth = 20
swatch = * 9
third = nameslen // 3
lastthird = third * 2
cols = (
names[0: third],
names[third: lastthird],
names[lastthird:],
)
blankitem = * (namewidth + len(swatch) + 2)
for i in range(third):
nameset = []
for colset in cols:
try:
nameset.append(colset[i])
except IndexError:
nameset.append(None)
continue
line = C().join(
C().join(
C(name.rjust(namewidth)),
C(swatch, back=name),
) if name else blankitem
for name in nameset
)
print(line)
return 0 | List all known color names. |
13,861 | def _set_ospf(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: u, u: None, u: None, u: u, u: u}}), is_container=, yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__ospf = t
if hasattr(self, ):
self._set() | Setter method for ospf, mapped from YANG variable /rbridge_id/router/ospf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf() directly. |
13,862 | def system_find_users(input_params={}, always_retry=True, **kwargs):
return DXHTTPRequest(, input_params, always_retry=always_retry, **kwargs) | Invokes the /system/findUsers API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Search#API-method%3A-%2Fsystem%2FfindUsers |
13,863 | def from_dict(cls, d, ignore=()):
filtered = {}
for k, v in d.items():
if k == "typeid":
assert v == cls.typeid, \
"Dict has typeid %s but %s has typeid %s" % \
(v, cls, cls.typeid)
elif k not in ignore:
filtered[k] = v
try:
inst = cls(**filtered)
except TypeError as e:
raise TypeError("%s raised error: %s" % (cls.typeid, str(e)))
return inst | Create an instance from a serialized version of cls
Args:
d(dict): Endpoints of cls to set
ignore(tuple): Keys to ignore
Returns:
Instance of this class |
13,864 | def try_checkpoint_metadata(self, trial):
if trial._checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Not saving data for trial w/ memory checkpoint.")
return
try:
logger.debug("Saving trial metadata.")
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception("Error checkpointing trial metadata.") | Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint. |
13,865 | def parse_url(self) -> RequestUrl:
if self._URL is None:
current_url = b"%s://%s%s" % (
encode_str(self.schema),
encode_str(self.host),
self._current_url
)
self._URL = RequestUrl(current_url)
return cast(RequestUrl, self._URL) | ่ทๅurl่งฃๆๅฏน่ฑก |
13,866 | def _internal_network_removed(self, ri, port, ex_gw_port):
itfc_deleted = False
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
network_name = ex_gw_port[].get()
if self._router_ids_by_vrf_and_ext_net.get(
vrf_name, {}).get(network_name) and (
ri.router[] in
self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):
if len(ri.internal_ports) == 1 and port in ri.internal_ports:
self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name].remove(ri.router[])
if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(
network_name):
LOG.debug("++ REMOVING NETWORK %s" % network_name)
itfc_deleted = True
del self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name]
if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):
del self._router_ids_by_vrf_and_ext_net[vrf_name]
driver.internal_network_removed(ri, port,
itfc_deleted=itfc_deleted)
if ri.snat_enabled and ex_gw_port:
driver.disable_internal_network_NAT(ri, port, ex_gw_port,
itfc_deleted=itfc_deleted) | Remove an internal router port
Check to see if this is the last port to be removed for
a given network scoped by a VRF (note: there can be
different mappings between VRFs and networks -- 1-to-1,
1-to-n, n-to-1, n-to-n -- depending on the configuration
and workflow used). If it is the last port, set the flag
indicating that the internal sub-interface for that netowrk
on the ASR should be deleted |
13,867 | def pvwatts_ac(pdc, pdc0, eta_inv_nom=0.96, eta_inv_ref=0.9637):
r
pac0 = eta_inv_nom * pdc0
zeta = pdc / pdc0
eta = np.zeros_like(pdc, dtype=float)
pdc_neq_0 = ~np.equal(pdc, 0)
eta = eta_inv_nom / eta_inv_ref * (
- 0.0162*zeta
- np.divide(0.0059, zeta, out=eta, where=pdc_neq_0)
+ 0.9858)
pac = eta * pdc
pac = np.minimum(pac0, pac)
pac = np.maximum(0, pac)
return pac | r"""
Implements NREL's PVWatts inverter model [1]_.
.. math::
\eta = \frac{\eta_{nom}}{\eta_{ref}} (-0.0162\zeta - \frac{0.0059}{\zeta} + 0.9858)
.. math::
P_{ac} = \min(\eta P_{dc}, P_{ac0})
where :math:`\zeta=P_{dc}/P_{dc0}` and :math:`P_{dc0}=P_{ac0}/\eta_{nom}`.
Parameters
----------
pdc: numeric
DC power.
pdc0: numeric
Nameplate DC rating.
eta_inv_nom: numeric, default 0.96
Nominal inverter efficiency.
eta_inv_ref: numeric, default 0.9637
Reference inverter efficiency. PVWatts defines it to be 0.9637
and is included here for flexibility.
Returns
-------
pac: numeric
AC power.
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual,"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf
(2014). |
13,868 | def find_elb_dns_zone_id(name=, env=, region=):
LOG.info(, name, env, region)
client = boto3.Session(profile_name=env).client(, region_name=region)
elbs = client.describe_load_balancers(LoadBalancerNames=[name])
return elbs[][0][] | Get an application's AWS elb dns zone id.
Args:
name (str): ELB name
env (str): Environment/account of ELB
region (str): AWS Region
Returns:
str: elb DNS zone ID |
13,869 | def zeroize():
device_name
conn = __proxy__[]()
ret = {}
ret[] = True
try:
conn.cli()
ret[] =
except Exception as exception:
ret[] = .format(exception)
ret[] = False
return ret | Resets the device to default factory settings
CLI Example:
.. code-block:: bash
salt 'device_name' junos.zeroize |
13,870 | def get_private_name(self, f):
f = self.__swagger_rename__[f] if f in self.__swagger_rename__.keys() else f
return + self.__class__.__name__ + + f | get private protected name of an attribute
:param str f: name of the private attribute to be accessed. |
13,871 | def _str(obj):
values = []
for name in obj._attribs:
val = getattr(obj, name)
if isinstance(val, str):
val = repr(val)
val = str(val) if len(str(val)) < 10 else "(...)"
values.append((name, val))
values = ", ".join("{}={}".format(k, v) for k, v in values)
return "{}({})".format(obj.__class__.__name__, values) | Show nicely the generic object received. |
13,872 | def get_earth_radii(self):
earth_model = self.prologue[][]
a = earth_model[] * 1000
b = (earth_model[] +
earth_model[]) / 2.0 * 1000
return a, b | Get earth radii from prologue
Returns:
Equatorial radius, polar radius [m] |
13,873 | def has(self, url, xpath=None):
if not path.exists(self.db_path):
return False
return self._query(url, xpath).count() > 0 | Check if a URL (and xpath) exists in the cache
If DB has not been initialized yet, returns ``False`` for any URL.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
bool: ``True`` if URL exists, ``False`` otherwise |
13,874 | def _values_of_same_type(self, val1, val2):
if self._is_supported_matrix(val1) and self._is_supported_matrix(val2):
return True
else:
return super(SparseParameter, self)._values_of_same_type(val1, val2) | Checks if two values agree in type.
The sparse parameter is less restrictive than the parameter. If both values
are sparse matrices they are considered to be of same type
regardless of their size and values they contain. |
13,875 | def db_open(cls, impl, working_dir):
path = config.get_snapshots_filename(impl, working_dir)
return cls.db_connect(path) | Open a connection to our chainstate db |
13,876 | def tf_loss_per_instance(self, states, internals, actions, terminal, reward,
next_states, next_internals, update, reference=None):
raise NotImplementedError | Creates the TensorFlow operations for calculating the loss per batch instance.
Args:
states: Dict of state tensors.
internals: Dict of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss per instance tensor. |
13,877 | def track(self, tracking_number):
"Track a UPS package by number. Returns just a delivery date."
resp = self.send_request(tracking_number)
return self.parse_response(resp) | Track a UPS package by number. Returns just a delivery date. |
13,878 | def add_quasi_dipole_coordinates(inst, glat_label=, glong_label=,
alt_label=):
import apexpy
ap = apexpy.Apex(date=inst.date)
qd_lat = []; qd_lon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
tlat, tlon = ap.geo2qd(lat, lon, alt)
qd_lat.append(tlat)
qd_lon.append(tlon)
mlt.append(ap.mlon2mlt(tlon, time))
inst[] = qd_lat
inst[] = qd_lon
inst[] = mlt
inst.meta[] = {:,:}
inst.meta[] = {:,:}
inst.meta[] = {:,:}
return | Uses Apexpy package to add quasi-dipole coordinates to instrument object.
The Quasi-Dipole coordinate system includes both the tilt and offset of the
geomagnetic field to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
This system is preferred over AACGM near the equator for LEO satellites.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'qd_lat'
for magnetic latitude, 'qd_long' for longitude, and 'mlt' for magnetic local time. |
13,879 | def string_to_sign(self):
return (AWS4_HMAC_SHA256 + "\n" +
self.request_timestamp + "\n" +
self.credential_scope + "\n" +
sha256(self.canonical_request.encode("utf-8")).hexdigest()) | The AWS SigV4 string being signed. |
13,880 | def null(alphabet):
return fsm(
alphabet = alphabet,
states = {0},
initial = 0,
finals = set(),
map = {
0: dict([(symbol, 0) for symbol in alphabet]),
},
) | An FSM accepting nothing (not even the empty string). This is
demonstrates that this is possible, and is also extremely useful
in some situations |
13,881 | def _get_number_of_slices(self, slice_type):
if slice_type == SliceType.AXIAL:
return self.dimensions[self.axial_orientation.normal_component]
elif slice_type == SliceType.SAGITTAL:
return self.dimensions[self.sagittal_orientation.normal_component]
elif slice_type == SliceType.CORONAL:
return self.dimensions[self.coronal_orientation.normal_component] | Get the number of slices in a certain direction |
13,882 | def create_pth():
if prefix == :
print("Not creating PTH in real prefix: %s" % prefix)
return False
with open(vext_pth, ) as f:
f.write(DEFAULT_PTH_CONTENT)
return True | Create the default PTH file
:return: |
13,883 | def delist(values):
assert isinstance(values, list)
if not values:
return None
elif len(values) == 1:
return values[0]
return values | Reduce lists of zero or one elements to individual values. |
13,884 | def bootstrap_vi(version=None, venvargs=None):
if not version:
version = get_latest_virtualenv_version()
tarball = download_virtualenv(version)
p = subprocess.Popen(.format(tarball), shell=True)
p.wait()
p = .format(version)
create_virtualenv(p, venvargs) | Bootstrap virtualenv into current directory
:param str version: Virtualenv version like 13.1.0 or None for latest version
:param list venvargs: argv list for virtualenv.py or None for default |
13,885 | def rgb_color_picker(obj, min_luminance=None, max_luminance=None):
color_value = int.from_bytes(
hashlib.md5(str(obj).encode()).digest(),
,
) % 0xffffff
color = Color(f)
if min_luminance and color.get_luminance() < min_luminance:
color.set_luminance(min_luminance)
elif max_luminance and color.get_luminance() > max_luminance:
color.set_luminance(max_luminance)
return color | Modified version of colour.RGB_color_picker |
13,886 | def pending_items(self) -> Iterable[Tuple[bytes, bytes]]:
for key, value in self._changes.items():
if value is not DELETED:
yield key, value | A tuple of (key, value) pairs for every key that has been updated.
Like :meth:`pending_keys()`, this does not return any deleted keys. |
13,887 | def cmp_ast(node1, node2):
if type(node1) != type(node2):
return False
if isinstance(node1, (list, tuple)):
if len(node1) != len(node2):
return False
for left, right in zip(node1, node2):
if not cmp_ast(left, right):
return False
elif isinstance(node1, ast.AST):
for field in node1._fields:
left = getattr(node1, field, Undedined)
right = getattr(node2, field, Undedined)
if not cmp_ast(left, right):
return False
else:
return node1 == node2
return True | Compare if two nodes are equal. |
13,888 | def createResourceMapFromStream(in_stream, base_url=d1_common.const.URL_DATAONE_ROOT):
pids = []
for line in in_stream:
pid = line.strip()
if pid == "
continue
if len(pids) < 2:
raise ValueError("Insufficient numbers of identifiers provided.")
logging.info("Read {} identifiers".format(len(pids)))
ore = ResourceMap(base_url=base_url)
logging.info("ORE PID = {}".format(pids[0]))
ore.initialize(pids[0])
logging.info("Metadata PID = {}".format(pids[1]))
ore.addMetadataDocument(pids[1])
ore.addDataDocuments(pids[2:], pids[1])
return ore | Create a simple OAI-ORE Resource Map with one Science Metadata document and any
number of Science Data objects, using a stream of PIDs.
Args:
in_stream:
The first non-blank line is the PID of the resource map itself. Second line is
the science metadata PID and remaining lines are science data PIDs.
Example stream contents:
::
PID_ORE_value
sci_meta_pid_value
data_pid_1
data_pid_2
data_pid_3
base_url : str
Root of the DataONE environment in which the Resource Map will be used.
Returns:
ResourceMap : OAI-ORE Resource Map |
13,889 | def tab(tab_name, element_list=None, section_list=None):
_tab = {
: ,
: tab_name,
}
if element_list is not None:
if isinstance(element_list, list):
_tab[] = element_list
else:
_tab[] = [element_list]
if section_list is not None:
if isinstance(section_list, list):
_tab[] = section_list
else:
if not in section_list:
_tab[] = element_list
else:
_tab[].append(element_list)
return _tab | Returns a dictionary representing a new tab to display elements.
This can be thought of as a simple container for displaying multiple
types of information.
Args:
tab_name: The title to display
element_list: The list of elements to display. If a single element is
given it will be wrapped in a list.
section_list: A list of sections to display.
Returns:
A dictionary with metadata specifying that it is to be rendered
as a page containing multiple elements and/or tab. |
13,890 | def output_datacenter(gandi, datacenter, output_keys, justify=14):
output_generic(gandi, datacenter, output_keys, justify)
if in output_keys:
output_line(gandi, , datacenter[], justify)
if in output_keys:
deactivate_at = datacenter.get()
if deactivate_at:
output_line(gandi, ,
deactivate_at.strftime(), justify)
closing = []
iaas_closed_for = datacenter.get()
if iaas_closed_for == :
closing.append()
paas_closed_for = datacenter.get()
if paas_closed_for == :
closing.append()
if closing:
output_line(gandi, , .join(closing), justify) | Helper to output datacenter information. |
13,891 | def delete_wallet(self, wallet_name):
return make_request(
.format(self.url, wallet_name),
method=,
timeout=self.timeout,
client=self._client) | Delete a wallet.
@param the name of the wallet.
@return a success string from the plans server.
@raise ServerError via make_request. |
13,892 | def get_element_by_name(self, el_name, el_idx=0):
el_list = self.get_element_list_by_name(el_name)
try:
return el_list[el_idx]
except IndexError:
raise SimpleXMLWrapperException(
.format(el_name, el_idx, len(el_list))
) | Args:
el_name : str
Name of element to get.
el_idx : int
Index of element to use as base in the event that there are multiple sibling
elements with the same name.
Returns:
element : The selected element. |
13,893 | def p_string_literal(self, p):
p[0] = self.asttypes.String(p[1])
p[0].setpos(p) | string_literal : STRING |
13,894 | def format_csv(self, delim=, qu=):
res = qu + self.name + qu + delim
if self.data:
for d in self.data:
res += qu + str(d) + qu + delim
return res + | Prepares the data in CSV format |
13,895 | def set_config(self, config):
with self._conn:
self._conn.execute("DELETE FROM config")
self._conn.execute(,
(serialize_config(config),)) | Set (replace) the configuration for the session.
Args:
config: Configuration object |
13,896 | def joint_sfs(dac1, dac2, n1=None, n2=None):
dac1, n1 = _check_dac_n(dac1, n1)
dac2, n2 = _check_dac_n(dac2, n2)
x = n1 + 1
y = n2 + 1
tmp = (dac1 * y + dac2).astype(int, copy=False)
s = np.bincount(tmp)
s.resize(x, y)
return s | Compute the joint site frequency spectrum between two populations.
Parameters
----------
dac1 : array_like, int, shape (n_variants,)
Derived allele counts for the first population.
dac2 : array_like, int, shape (n_variants,)
Derived allele counts for the second population.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes)
Array where the (i, j)th element is the number of variant sites with i
derived alleles in the first population and j derived alleles in the
second population. |
13,897 | def store_disorder(self, sc=None, force_rerun=False):
log.info()
from random import shuffle
g_ids = [g.id for g in self.reference_gempro.functional_genes]
shuffle(g_ids)
def _store_disorder_sc(g_id, outdir=self.sequences_by_gene_dir,
g_to_pickle=self.gene_protein_pickles, force_rerun=force_rerun):
import ssbio.utils
import ssbio.io
import os.path as op
protein_seqs_pickle_path = op.join(outdir, .format(g_id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_pickle_path):
protein_pickle_path = g_to_pickle[g_id]
protein_pickle = ssbio.io.load_pickle(protein_pickle_path)
protein_pickle.get_all_disorder_predictions(representative_only=False)
protein_pickle.save_pickle(outfile=protein_seqs_pickle_path)
return g_id, protein_seqs_pickle_path
if sc:
genes_rdd = sc.parallelize(g_ids)
result = genes_rdd.map(_store_disorder_sc).collect()
else:
result = []
for g in tqdm(g_ids):
result.append(self._load_sequences_to_reference_gene(g, force_rerun))
log.info()
for g_id, protein_pickle in result:
self.gene_protein_pickles[g_id] = protein_pickle | Wrapper for _store_disorder |
13,898 | def MultimodeCombine(pupils):
fluxes=[np.vdot(pupils[i],pupils[i]).real for i in range(len(pupils))]
coherentFluxes=[np.vdot(pupils[i],pupils[j])
for i in range(1,len(pupils))
for j in range(i)]
return fluxes,coherentFluxes | Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner (no spatial filtering) |
13,899 | def parse_val(cfg,section,option):
vals = parse_vals(cfg,section,option)
if len(vals)==0:
return
else:
assert len(vals)==1, (section, option, vals, type(vals))
return vals[0] | extract a single value from .cfg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.