Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
18,400 | def start(self):
log.debug()
if in self.address:
self.skt = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.reuse_port:
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, ):
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
else:
log.error()
try:
self.skt.bind((self.address, int(self.port)))
except socket.error as msg:
error_string = .format(self.port, self.address, msg)
log.error(error_string, exc_info=True)
raise BindException(error_string)
log.debug(, self.max_clients)
self.skt.listen(self.max_clients)
self.thread_serve = threading.Thread(target=self._serve_clients)
self.thread_serve.start() | Start listening for messages. |
18,401 | def cursor_batch(self,
table_name,
start_timeperiod,
end_timeperiod):
raise NotImplementedError(.format(self.__class__.__name__)) | method returns batched DB cursor |
18,402 | def login(self, email=None, password=None):
if email is None:
if self._parent_class.email:
email = self._parent_class.email
else:
email = compat_input("login: ")
if password is None:
if self._parent_class._password:
password = self._parent_class._password
else:
password = getpass.getpass()
self._parent_class.add_headers({: "{}/v2.0/api/login".format(self._parent_class.controller)})
response = self._parent_class.post.login({"email": email, "password": password})
if response.cgx_status:
if not response.cgx_content.get():
urlpath = response.cgx_content.get("urlpath", "")
request_id = response.cgx_content.get("requestId", "")
if urlpath and request_id:
print(.format(urlpath))
found_auth_token = False
for i in range(20):
print(.format((20 - i) * 5))
saml_response = self.check_sso_login(email, request_id)
if saml_response.cgx_status and saml_response.cgx_content.get():
found_auth_token = True
break
time.sleep(5)
if not found_auth_token:
print("Login time expired! Please re-login.\n")
try:
api_logger.debug("LOGIN_FAIL_RESPONSE = %s", json.dumps(response, indent=4))
except (TypeError, ValueError):
return False | Interactive login using the `cloudgenix.API` object. This function is more robust and handles SAML and MSP accounts.
Expects interactive capability. if this is not available, use `cloudenix.API.post.login` directly.
**Parameters:**:
- **email**: Email to log in for, will prompt if not entered.
- **password**: Password to log in with, will prompt if not entered. Ignored for SAML v2.0 users.
**Returns:** Bool. In addition the function will mutate the `cloudgenix.API` constructor items as needed. |
18,403 | def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk | Yield pieces of data from a file-like object until EOF. |
18,404 | def get_discrete_grid(self):
sets_grid = []
for d in self.space:
if d.type == :
sets_grid.extend([d.domain]*d.dimensionality)
return np.array(list(itertools.product(*sets_grid))) | Computes a Numpy array with the grid of points that results after crossing the possible outputs of the discrete
variables |
18,405 | def setbit(self, name, offset, val):
val = int(get_boolean(, val))
offset = get_positive_integer(, offset)
return self.execute_command(, name, offset, val) | Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
Like **Redis.SETBIT**
:param string name: the key name
:param int offset: the bit position
:param bool val: the bit value
:return: the previous bit (False or True) at the ``offset``
:rtype: bool
>>> ssdb.set('bit_test', 1)
True
>>> ssdb.setbit('bit_test', 1, 1)
False
>>> ssdb.get('bit_test')
3
>>> ssdb.setbit('bit_test', 2, 1)
False
>>> ssdb.get('bit_test')
7 |
18,406 | def add_class(self, node):
self.linker.visit(node)
self.classdiagram.add_object(self.get_title(node), node) | visit one class and add it to diagram |
18,407 | def parse(
idp_metadata,
required_sso_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT,
required_slo_binding=OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT,
entity_id=None):
data = {}
dom = fromstring(idp_metadata, forbid_dtd=True)
entity_desc_path =
if entity_id:
entity_desc_path += "[@entityID=]" % entity_id
entity_descriptor_nodes = OneLogin_Saml2_Utils.query(dom, entity_desc_path)
idp_entity_id = want_authn_requests_signed = idp_name_id_format = idp_sso_url = idp_slo_url = certs = None
if len(entity_descriptor_nodes) > 0:
entity_descriptor_node = entity_descriptor_nodes[0]
idp_descriptor_nodes = OneLogin_Saml2_Utils.query(entity_descriptor_node, )
if len(idp_descriptor_nodes) > 0:
idp_descriptor_node = idp_descriptor_nodes[0]
idp_entity_id = entity_descriptor_node.get(, None)
want_authn_requests_signed = entity_descriptor_node.get(, None)
name_id_format_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, )
if len(name_id_format_nodes) > 0:
idp_name_id_format = OneLogin_Saml2_Utils.element_text(name_id_format_nodes[0])
sso_nodes = OneLogin_Saml2_Utils.query(
idp_descriptor_node,
"./md:SingleSignOnService[@Binding=]" % required_sso_binding
)
if len(sso_nodes) > 0:
idp_sso_url = sso_nodes[0].get(, None)
slo_nodes = OneLogin_Saml2_Utils.query(
idp_descriptor_node,
"./md:SingleLogoutService[@Binding=]" % required_slo_binding
)
if len(slo_nodes) > 0:
idp_slo_url = slo_nodes[0].get(, None)
signing_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, "./md:KeyDescriptor[not(contains(@use, ))]/ds:KeyInfo/ds:X509Data/ds:X509Certificate")
encryption_nodes = OneLogin_Saml2_Utils.query(idp_descriptor_node, "./md:KeyDescriptor[not(contains(@use, ))]/ds:KeyInfo/ds:X509Data/ds:X509Certificate")
if len(signing_nodes) > 0 or len(encryption_nodes) > 0:
certs = {}
if len(signing_nodes) > 0:
certs[] = []
for cert_node in signing_nodes:
certs[].append(.join(OneLogin_Saml2_Utils.element_text(cert_node).split()))
if len(encryption_nodes) > 0:
certs[] = []
for cert_node in encryption_nodes:
certs[].append(.join(OneLogin_Saml2_Utils.element_text(cert_node).split()))
data[] = {}
if idp_entity_id is not None:
data[][] = idp_entity_id
if idp_sso_url is not None:
data[][] = {}
data[][][] = idp_sso_url
data[][][] = required_sso_binding
if idp_slo_url is not None:
data[][] = {}
data[][][] = idp_slo_url
data[][][] = required_slo_binding
if certs is not None:
if (len(certs) == 1 and
(( in certs and len(certs[]) == 1) or
( in certs and len(certs[]) == 1))) or \
(( in certs and len(certs[]) == 1) and
( in certs and len(certs[]) == 1 and
certs[][0] == certs[][0])):
if in certs:
data[][] = certs[][0]
else:
data[][] = certs[][0]
else:
data[][] = certs
if want_authn_requests_signed is not None:
data[] = {}
data[][] = want_authn_requests_signed
if idp_name_id_format:
data[] = {}
data[][] = idp_name_id_format
return data | Parse the Identity Provider metadata and return a dict with extracted data.
If there are multiple <IDPSSODescriptor> tags, parse only the first.
Parse only those SSO endpoints with the same binding as given by
the `required_sso_binding` parameter.
Parse only those SLO endpoints with the same binding as given by
the `required_slo_binding` parameter.
If the metadata specifies multiple SSO endpoints with the required
binding, extract only the first (the same holds true for SLO
endpoints).
:param idp_metadata: XML of the Identity Provider Metadata.
:type idp_metadata: string
:param required_sso_binding: Parse only POST or REDIRECT SSO endpoints.
:type required_sso_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT
or OneLogin_Saml2_Constants.BINDING_HTTP_POST
:param required_slo_binding: Parse only POST or REDIRECT SLO endpoints.
:type required_slo_binding: one of OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT
or OneLogin_Saml2_Constants.BINDING_HTTP_POST
:param entity_id: Specify the entity_id of the EntityDescriptor that you want to parse a XML
that contains multiple EntityDescriptor.
:type entity_id: string
:returns: settings dict with extracted data
:rtype: dict |
18,408 | def create(self, request):
self._site_login(request.repo)
self.prefix = "{}_Pull_Request_{}".format(request.repo.name, request.pull.number)
self._edit_main(request)
return self._create_new(request) | Creates a new wiki page for the specified PullRequest instance. The page
gets initialized with basic information about the pull request, the tests
that will be run, etc. Returns the URL on the wiki.
:arg request: the PullRequest instance with testing information. |
18,409 | def split_comp_info(self, catalog_name, split_ver, split_key):
return self._split_comp_info_dicts["%s_%s" % (catalog_name, split_ver)][split_key] | Return the info for a particular split key |
18,410 | def _identity_stmt(self, stmt: Statement, sctx: SchemaContext) -> None:
if not sctx.schema_data.if_features(stmt, sctx.text_mid):
return
id = (stmt.argument, sctx.schema_data.namespace(sctx.text_mid))
adj = sctx.schema_data.identity_adjs.setdefault(id, IdentityAdjacency())
for bst in stmt.find_all("base"):
bid = sctx.schema_data.translate_pname(bst.argument, sctx.text_mid)
adj.bases.add(bid)
badj = sctx.schema_data.identity_adjs.setdefault(
bid, IdentityAdjacency())
badj.derivs.add(id)
sctx.schema_data.identity_adjs[id] = adj | Handle identity statement. |
18,411 | def build_tqdm_outer(self, desc, total):
return self.tqdm(desc=desc, total=total, leave=self.leave_outer, initial=self.initial) | Extension point. Override to provide custom options to outer progress bars (Epoch loop)
:param desc: Description
:param total: Number of epochs
:return: new progress bar |
18,412 | def render_view(parser, token):
bits = token.split_contents()
n = len(bits)
if n < 2:
raise TemplateSyntaxError(" takes at least one view as argument")
viewname = bits[1]
kwargs = {}
if n > 2:
for bit in bits[2:]:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to render_view tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
return StringNode(viewname, kwargs) | Return an string version of a View with as_string method.
First argument is the name of the view. Any other arguments
should be keyword arguments and will be passed to the view.
Example:
{% render_view viewname var1=xx var2=yy %} |
18,413 | def _set_attribute(self, name, value):
setattr(self, name, value)
self.namespace.update({name: getattr(self, name)}) | Make sure namespace gets updated when setting attributes. |
18,414 | def _cont_norm(fluxes, ivars, cont):
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
bad = cont == 0.
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[~bad] = fluxes[~bad] / cont[~bad]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars | Continuum-normalize a continuous segment of spectra.
Parameters
----------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
contmask: boolean mask
True indicates that pixel is continuum
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances |
18,415 | def upload_plugin(self, plugin_path):
files = {
: open(plugin_path, )
}
headers = {
:
}
upm_token = self.request(method=, path=, headers=headers, trailing=True).headers[
]
url = .format(upm_token=upm_token)
return self.post(url, files=files, headers=headers) | Provide plugin path for upload into Jira e.g. useful for auto deploy
:param plugin_path:
:return: |
18,416 | def infer(self, sensationList, reset=True, objectName=None):
self._unsetLearningMode()
statistics = collections.defaultdict(list)
for sensations in sensationList:
for col in xrange(self.numColumns):
location, feature = sensations[col]
self.sensorInputs[col].addDataToQueue(list(feature), 0, 0)
self.externalInputs[col].addDataToQueue(list(location), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
self._sendReset()
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics) | Infer on given sensations.
The provided sensationList is a list of sensations, and each sensation is
a mapping from cortical column to a tuple of two SDR's respectively
corresponding to the location in object space and the feature.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
]
In many uses cases, this object can be created by implementations of
ObjectMachines (cf htm.research.object_machine_factory), through their
method providedObjectsToInfer.
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
Parameters:
----------------------------
@param sensationList (list)
List of sensations, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning). |
18,417 | def is_authorization_expired(self):
if not self.auth.token_expiration:
return True
return (datetime.datetime.utcnow() > self.auth.token_expiration) | Checks if the authorization token (access_token) has expired.
:return: If expired.
:rtype: ``bool`` |
18,418 | def purgeCache(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs) | Purge Worker Cache
Publish a purge-cache message to purge caches named `cacheName` with
`provisionerId` and `workerType` in the routing-key. Workers should
be listening for this message and purge caches when they see it.
This method takes input: ``v1/purge-cache-request.json#``
This method is ``stable`` |
18,419 | def safe_main():
try:
main()
except KeyboardInterrupt:
logger.info("Cancelled by user")
sys.exit(0)
except ProgramError as e:
logger.error(e.message)
parser.error(e.message) | A safe version of the main function (that catches ProgramError). |
18,420 | async def stdout_writer():
if sys.stdout.seekable():
return sys.stdout.buffer.raw
if os.isatty(sys.stdin.fileno()):
fd_to_use = 0
else:
fd_to_use = 1
twrite, pwrite = await loop.connect_write_pipe(
asyncio.streams.FlowControlMixin,
os.fdopen(fd_to_use, "wb"),
)
swrite = asyncio.StreamWriter(
twrite,
pwrite,
None,
loop,
)
return swrite | This is a bit complex, as stdout can be a pipe or a file.
If it is a file, we cannot use
:meth:`asycnio.BaseEventLoop.connect_write_pipe`. |
18,421 | def generate_shared_access_signature(self, services, resource_types,
permission, expiry, start=None,
ip=None, protocol=None):
_validate_not_none(, self.account_name)
_validate_not_none(, self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(services, resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol) | Generates a shared access signature for the account.
Use the returned signature with the sas_token parameter of the service
or to create a new account object.
:param Services services:
Specifies the services accessible with the account SAS. You can
combine values to provide access to more than one service.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account
SAS. You can combine values to provide access to more than one
resource type.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy. You can combine
values to provide more than one permission.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: datetime or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. Possible values are
both HTTPS and HTTP (https,http) or HTTPS only (https). The default value
is https,http. Note that HTTP only is not a permitted value. |
18,422 | def _handle_backend_error(self, exception, idp):
loaded_state = self.load_state(exception.state)
relay_state = loaded_state["relay_state"]
resp_args = loaded_state["resp_args"]
error_resp = idp.create_error_response(resp_args["in_response_to"],
resp_args["destination"],
Exception(exception.message))
http_args = idp.apply_binding(resp_args["binding"], str(error_resp), resp_args["destination"], relay_state,
response=True)
satosa_logging(logger, logging.DEBUG, "HTTPargs: %s" % http_args, exception.state)
return make_saml_response(resp_args["binding"], http_args) | See super class satosa.frontends.base.FrontendModule
:type exception: satosa.exception.SATOSAAuthenticationError
:type idp: saml.server.Server
:rtype: satosa.response.Response
:param exception: The SATOSAAuthenticationError
:param idp: The saml frontend idp server
:return: A response |
18,423 | def prehook(self, **kwargs):
cmd = [, ]
logger.info("Starting smpd: "+" ".join(cmd))
rc = subprocess.call(cmd)
return rc | Launch local smpd. |
18,424 | def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc",
"cqt", "tempogram"]):
if self.feature_str not in valid_features:
raise RuntimeError("Feature %s in not valid for algorithm: %s "
"(valid features are %s)." %
(self.feature_str, __name__, valid_features))
else:
try:
F = self.features.features
except KeyError:
raise RuntimeError("Feature %s in not supported by MSAF" %
(self.feature_str))
return F | This method obtains the actual features. |
18,425 | def load_file_contents(cls, file_contents, seed_values=None):
@contextmanager
def opener(file_content):
with io.BytesIO(file_content.content) as fh:
yield fh
return cls._meta_load(opener, file_contents, seed_values) | Loads config from the given string payloads.
A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT
section, and be available for use in substitutions. The caller may override some of these
seed values.
:param list[FileContents] file_contents: Load from these FileContents. Later instances take
precedence over earlier ones. If empty, returns an
empty config.
:param seed_values: A dict with optional override seed values for buildroot, pants_workdir,
pants_supportdir and pants_distdir. |
18,426 | def init_map(self):
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
mapview = self.map
mid = mapview.getId()
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
mapview.setOnCameraIdleListener(mid)
mapview.onMapClick.connect(self.on_map_clicked)
mapview.setOnMapClickListener(mid)
mapview.onMapLongClick.connect(self.on_map_long_clicked)
mapview.setOnMapLongClickListener(mid)
mapview.onMarkerClick.connect(self.on_marker_clicked)
mapview.setOnMarkerClickListener(self.map.getId())
mapview.onMarkerDragStart.connect(self.on_marker_drag_start)
mapview.onMarkerDrag.connect(self.on_marker_drag)
mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)
mapview.setOnMarkerDragListener(mid)
mapview.onInfoWindowClick.connect(self.on_info_window_clicked)
mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)
mapview.onInfoWindowClose.connect(self.on_info_window_closed)
mapview.setOnInfoWindowClickListener(mid)
mapview.setOnInfoWindowCloseListener(mid)
mapview.setOnInfoWindowLongClickListener(mid)
mapview.onPolygonClick.connect(self.on_poly_clicked)
mapview.onPolylineClick.connect(self.on_poly_clicked)
mapview.setOnPolygonClickListener(mid)
mapview.setOnPolylineClickListener(mid)
mapview.onCircleClick.connect(self.on_circle_clicked)
mapview.setOnCircleClickListener(mid) | Add markers, polys, callouts, etc.. |
18,427 | def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs):
if isinstance(fname, xr.Dataset):
return fname
if not isstring(fname):
try:
fname[0]
except TypeError:
pass
else:
if store_mod is not None and store_cls is not None:
if isstring(store_mod):
store_mod = repeat(store_mod)
if isstring(store_cls):
store_cls = repeat(store_cls)
fname = [_open_store(sm, sc, f)
for sm, sc, f in zip(store_mod, store_cls, fname)]
kwargs[] = None
kwargs[] = False
return open_mfdataset(fname, **kwargs)
if store_mod is not None and store_cls is not None:
fname = _open_store(store_mod, store_cls, fname)
return open_dataset(fname, **kwargs) | Open a dataset and return it |
18,428 | def convert_user_type(cls, name, value):
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values) | Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ. |
18,429 | def merge_offsets_metadata(topics, *offsets_responses):
result = dict()
for topic in topics:
partition_offsets = [
response[topic]
for response in offsets_responses
if topic in response
]
result[topic] = merge_partition_offsets(*partition_offsets)
return result | Merge the offset metadata dictionaries from multiple responses.
:param topics: list of topics
:param offsets_responses: list of dict topic: partition: offset
:returns: dict topic: partition: offset |
18,430 | def _buildTemplates(self):
contents = self._renderTemplate("html-multi/index.html", extraContext={"theme": self.theme, "index_page_flag" : True})
FILE_NAME = "index.html"
main_url = self._save2File(contents, FILE_NAME, self.output_path)
contents = self._renderTemplate("html-multi/statistics.html", extraContext={"theme": self.theme})
FILE_NAME = "statistics.html"
self._save2File(contents, FILE_NAME, self.output_path)
if False:
contents = self._renderTemplate("html-multi/viz_list.html", extraContext={"theme": self.theme})
FILE_NAME = "visualizations.html"
self._save2File(contents, FILE_NAME, self.output_path)
browser_output_path = self.output_path
extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme}
contents = self._renderTemplate("html-multi/browser/browser_entities_az.html", extraContext=extra_context)
FILE_NAME = "entities-az.html"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_classes:
extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme,
"treetype" : "classes",
: formatHTML_EntityTreeTable(self.ontospy_graph.ontologyClassTree())}
contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context)
FILE_NAME = "entities-tree-classes.html"
self._save2File(contents, FILE_NAME, browser_output_path)
for entity in self.ontospy_graph.all_classes:
extra_context = {"main_entity": entity,
"main_entity_type": "class",
"theme": self.theme,
"ontograph": self.ontospy_graph
}
extra_context.update(self.highlight_code(entity))
contents = self._renderTemplate("html-multi/browser/browser_classinfo.html", extraContext=extra_context)
FILE_NAME = entity.slug + ".html"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_properties:
extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme,
"treetype" : "properties",
: formatHTML_EntityTreeTable(self.ontospy_graph.ontologyPropTree())}
contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context)
FILE_NAME = "entities-tree-properties.html"
self._save2File(contents, FILE_NAME, browser_output_path)
for entity in self.ontospy_graph.all_properties:
extra_context = {"main_entity": entity,
"main_entity_type": "property",
"theme": self.theme,
"ontograph": self.ontospy_graph
}
extra_context.update(self.highlight_code(entity))
contents = self._renderTemplate("html-multi/browser/browser_propinfo.html", extraContext=extra_context)
FILE_NAME = entity.slug + ".html"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_skos_concepts:
extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme,
"treetype" : "concepts",
: formatHTML_EntityTreeTable(self.ontospy_graph.ontologyConceptTree())}
contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context)
FILE_NAME = "entities-tree-concepts.html"
self._save2File(contents, FILE_NAME, browser_output_path)
for entity in self.ontospy_graph.all_skos_concepts:
extra_context = {"main_entity": entity,
"main_entity_type": "concept",
"theme": self.theme,
"ontograph": self.ontospy_graph
}
extra_context.update(self.highlight_code(entity))
contents = self._renderTemplate("html-multi/browser/browser_conceptinfo.html", extraContext=extra_context)
FILE_NAME = entity.slug + ".html"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_shapes:
extra_context = {"ontograph": self.ontospy_graph, "theme": self.theme,
"treetype" : "shapes", : formatHTML_EntityTreeTable(self.ontospy_graph.ontologyShapeTree()) }
contents = self._renderTemplate("html-multi/browser/browser_entities_tree.html", extraContext=extra_context)
FILE_NAME = "entities-tree-shapes.html"
self._save2File(contents, FILE_NAME, browser_output_path)
for entity in self.ontospy_graph.all_shapes:
extra_context = {"main_entity": entity,
"main_entity_type": "shape",
"theme": self.theme,
"ontograph": self.ontospy_graph
}
extra_context.update(self.highlight_code(entity))
contents = self._renderTemplate("html-multi/browser/browser_shapeinfo.html", extraContext=extra_context)
FILE_NAME = entity.slug + ".html"
self._save2File(contents, FILE_NAME, browser_output_path)
return main_url | OVERRIDING THIS METHOD from Factory |
18,431 | def create_surface_grid(nr_electrodes=None, spacing=None,
electrodes_x=None,
depth=None,
left=None,
right=None,
char_lengths=None,
lines=None,
debug=False,
workdir=None):
if(electrodes_x is None and
(nr_electrodes is None or spacing is None)):
raise Exception(
+
)
if electrodes_x is None:
electrodes = np.array(
[(x, 0.0) for x in np.arange(0.0, nr_electrodes)]
)
electrodes[:, 0] = electrodes[:, 0] * spacing
electrodes = np.round(electrodes, 3)
else:
nr_electrodes = len(electrodes_x)
electrodes = np.hstack((electrodes_x, np.zeros_like(electrodes_x)))
max_distance = np.abs(
np.max(electrodes[:, 0]) - np.min(electrodes[:, 0])
)
minx = electrodes[:, 0].min()
maxx = electrodes[:, 0].max()
if left is None:
left = max_distance / 4
if right is None:
right = max_distance / 4
if depth is None:
depth = max_distance / 2
minimum_x = minx - left
maximum_x = maxx + left
minimum_z = -depth
maximum_z = 0
boundary_noflow = 11
boundary_mixed = 12
extra_lines = []
add_boundary_nodes_left = []
add_boundary_nodes_right = []
if lines is not None:
lines = np.array(lines)
lines[np.where(np.array(lines) < 0)] *= -1
lines = sorted(lines)
for line_depth in lines:
extra_lines.append(
(minimum_x, -line_depth, maximum_x, -line_depth)
)
add_boundary_nodes_left.append(
(minimum_x, -line_depth, boundary_mixed)
)
add_boundary_nodes_right.append(
(maximum_x, -line_depth, boundary_mixed)
)
add_boundary_nodes_left = np.array(add_boundary_nodes_left)[::-1]
add_boundary_nodes_right = np.array(add_boundary_nodes_right)
surface_electrodes = np.hstack((
electrodes, boundary_noflow * np.ones((electrodes.shape[0], 1))
))
boundaries = np.vstack((
(minimum_x, 0, boundary_noflow),
surface_electrodes,
(maximum_x, maximum_z, boundary_mixed),
))
if len(add_boundary_nodes_right) != 0:
boundaries = np.vstack((
boundaries,
add_boundary_nodes_right,
))
boundaries = np.vstack((
boundaries,
(maximum_x, minimum_z, boundary_mixed),
(minimum_x, minimum_z, boundary_mixed),
))
if len(add_boundary_nodes_left) != 0:
boundaries = np.vstack(
(
add_boundary_nodes_left,
)
)
if char_lengths is None:
char_lengths = [spacing / 3.0, ]
if workdir is None:
tempdir_obj = tempfile.TemporaryDirectory()
tempdir = tempdir_obj.name
else:
if not os.path.isdir(workdir):
os.makedirs(workdir)
tempdir = workdir
np.savetxt(
tempdir + os.sep + , electrodes,
fmt=
)
np.savetxt(tempdir + os.sep + , boundaries,
fmt=)
np.savetxt(
tempdir + os.sep + ,
np.atleast_1d(char_lengths)
)
if extra_lines:
np.savetxt(
tempdir + os.sep + ,
np.atleast_2d(extra_lines),
fmt=
)
pwd = os.getcwd()
os.chdir(tempdir)
try:
if debug:
subprocess.call(
,
shell=True,
)
else:
subprocess.check_output(
,
shell=True,
)
except subprocess.CalledProcessError as e:
print()
print(e.returncode)
print(e.output)
import shutil
shutil.copytree(tempdir, pwd + os.sep + )
exit()
finally:
os.chdir(pwd)
grid = crt_grid(
elem_file=tempdir + os.sep + + os.sep + ,
elec_file=tempdir + os.sep + + os.sep + ,
)
if workdir is None:
tempdir_obj.cleanup()
return grid | This is a simple wrapper for cr_trig_create to create simple surface
grids.
Automatically generated electrode positions are rounded to the third
digit.
Parameters
----------
nr_electrodes: int, optional
the number of surface electrodes
spacing: float, optional
the spacing between electrodes, usually in [m], required if nr of
electrodes is given
electrodes_x: array, optional
x-electrode positions can be provided here, e.g., for
non-equidistant electrode distances
depth: float, optional
the depth of the grid. If not given, this is computed as half the
maximum distance between electrodes
left: float, optional
the space allocated left of the first electrode. If not given,
compute as a fourth of the maximum inter-electrode distance
right: float, optional
the space allocated right of the first electrode. If not given,
compute as a fourth of the maximum inter-electrode distance
char_lengths: float|list of 4 floats, optional
characteristic lengths, as used by cr_trig_create
lines: list of floats, optional
at the given depths, add horizontal lines in the grid. Note that
all positive values will be multiplied by -1!
debug: bool, optional
default: False. If true, don't hide the output of cr_trig_create
workdir: string, optional
if set, use this directory to create the grid. Don't delete files
afterwards.
Returns
-------
grid: :class:`crtomo.grid.crt_grid` instance
the generated grid
Examples
--------
>>> from crtomo.grid import crt_grid
>>> grid = crt_grid.create_surface_grid(40, spacing=0.25, depth=5,
... left=2, right=2, char_lengths=[0.1, 0.5, 0.1, 0.5],
... lines=[0.4, 0.8], debug=False, workdir=None)
>>> import pylab as plt
>>> fig, ax = plt.subplots()
>>> grid.plot_grid_to_ax(ax) |
18,432 | def _handle_http_error(self, url, response_obj, status_code, psp_ref,
raw_request, raw_response, headers, message):
if status_code == 404:
if url == self.merchant_specific_url:
erstr = "Received a 404 for url:. Please ensure that" \
" the custom merchant specific url is correct" \
.format(url)
raise AdyenAPICommunicationError(erstr,
error_code=response_obj.get(
"errorCode"))
else:
erstr = "Unexpected error while communicating with Adyen." \
" Please reach out to [email protected]" \
" if the problem persists"
raise AdyenAPICommunicationError(erstr,
raw_request=raw_request,
raw_response=raw_response,
url=url,
psp=psp_ref,
headers=headers,
error_code=response_obj.get(
"errorCode"))
elif status_code == 400:
erstr = "Received validation error with errorCode: %s," \
" message: %s, HTTP Code: %s. Please verify" \
" the values provided. Please reach out" \
" to [email protected] if the problem persists," \
" providing the PSP reference: %s" % (
response_obj["errorCode"], response_obj["message"],
status_code, psp_ref)
raise AdyenAPIValidationError(erstr, error_code=response_obj.get(
"errorCode"))
elif status_code == 401:
erstr = "Unable to authenticate with Adyen%st exist or you donmerchantAccount{}{}'. Please reach out to "
"[email protected] if the problem persists"
" with the psp:{}".format(raw_response, status_code, psp_ref),
status_code=status_code,
raw_request=raw_request,
raw_response=raw_response,
url=url,
psp=psp_ref,
headers=headers, error_code=response_obj.get("errorCode")) | This function handles the non 200 responses from Adyen, raising an
error that should provide more information.
Args:
url (str): url of the request
response_obj (dict): Dict containing the parsed JSON response from
Adyen
status_code (int): HTTP status code of the request
psp_ref (str): Psp reference of the request attempt
raw_request (str): The raw request placed to Adyen
raw_response (str): The raw response(body) returned by Adyen
headers(dict): headers of the response
Returns:
None |
18,433 | def intersect_leaderboards(self, destination, keys, aggregate=):
keys.insert(0, self.leaderboard_name)
self.redis_connection.zinterstore(destination, keys, aggregate) | Intersect leaderboards given by keys with this leaderboard into a named destination leaderboard.
@param destination [String] Destination leaderboard name.
@param keys [Array] Leaderboards to be merged with the current leaderboard.
@param options [Hash] Options for intersecting the leaderboards. |
18,434 | def _arg(self, line):
t have support for ARG, so instead will issue
a warning to the console for the user to export the variable
with SINGULARITY prefixed at build.
Parameters
==========
line: the line from the recipe file to parse for ARG
ARG', line)
bot.warning("ARG is not supported for Singularity! To get %s" %line[0])
bot.warning("in the container, on host export SINGULARITY_%s" %line[0]) | singularity doesn't have support for ARG, so instead will issue
a warning to the console for the user to export the variable
with SINGULARITY prefixed at build.
Parameters
==========
line: the line from the recipe file to parse for ARG |
18,435 | def file_list(*packages, **kwargs):
s package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt lowpkg.file_list httpd
salt lowpkg.file_list httpd postfix
salt lowpkg.file_list
dpkg -l {0} cmd.run_allretcodeError: stderrstdoutii versiondescription No packages founddpkg -L {0}cmd.runerrorsfiles': list(ret)} | List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list |
18,436 | def insertRnaQuantificationSet(self, rnaQuantificationSet):
try:
models.Rnaquantificationset.create(
id=rnaQuantificationSet.getId(),
datasetid=rnaQuantificationSet.getParentContainer().getId(),
referencesetid=rnaQuantificationSet.getReferenceSet().getId(),
name=rnaQuantificationSet.getLocalId(),
dataurl=rnaQuantificationSet.getDataUrl(),
attributes=json.dumps(rnaQuantificationSet.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
rnaQuantificationSet.getLocalId(),
rnaQuantificationSet.getParentContainer().getLocalId()) | Inserts a the specified rnaQuantificationSet into this repository. |
18,437 | async def main() -> None:
logging.basicConfig(level=logging.INFO)
async with ClientSession() as websession:
try:
client = Client(websession)
await client.profile.login(, )
_LOGGER.info(, client.profile.account_id)
summary = await client.profile.summary()
_LOGGER.info(, summary)
packages = await client.profile.packages()
_LOGGER.info(, packages)
except SeventeenTrackError as err:
print(err) | Create the aiohttp session and run the example. |
18,438 | def embedding_density(
adata: AnnData,
basis: str,
*,
groupby: Optional[str] = None,
key_added: Optional[str] = None,
components: Union[str, Sequence[str]] = None
):
sanitize_anndata(adata)
logg.info({}\.format(basis), r=True)
basis = basis.lower()
if basis == :
basis =
if +basis not in adata.obsm_keys():
raise ValueError(
.format(basis))
if components is None: components =
if isinstance(components, str): components = components.split()
components = np.array(components).astype(int) - 1
if len(components) != 2:
raise ValueError()
if basis == : components += 1
if groupby is not None:
if groupby not in adata.obs:
raise ValueError(.format(groupby))
if adata.obs[groupby].dtype.name != :
raise ValueError(.format(groupby))
if len(adata.obs[groupby].cat.categories) > 10:
raise ValueError(.format(groupby))
if key_added is not None:
density_covariate = key_added
elif groupby is not None:
density_covariate = basis++groupby
else:
density_covariate = basis+
if groupby is not None:
categories = adata.obs[groupby].cat.categories
density_values = np.zeros(adata.n_obs)
for cat in categories:
cat_mask = adata.obs[groupby] == cat
embed_x = adata.obsm[+basis][cat_mask, components[0]]
embed_y = adata.obsm[+basis][cat_mask, components[1]]
dens_embed = _calc_density(embed_x, embed_y)
density_values[cat_mask] = dens_embed
adata.obs[density_covariate] = density_values
else:
embed_x = adata.obsm[+basis][:, components[0]]
embed_y = adata.obsm[+basis][:, components[1]]
adata.obs[density_covariate] = _calc_density(embed_x, embed_y)
if basis != : components += 1
adata.uns[density_covariate+] = {:groupby, :components.tolist()}
logg.hint(
{}\
{}_params\.format(density_covariate, density_covariate))
return None | Calculate the density of cells in an embedding (per condition)
Gaussian kernel density estimation is used to calculate the density of
cells in an embedded space. This can be performed per category over a
categorical cell annotation. The cell density can be plotted using the
`sc.pl.embedding_density()` function.
Note that density values are scaled to be between 0 and 1. Thus, the
density value at each cell is only comparable to other densities in
the same condition category.
This function was written by Sophie Tritschler and implemented into
Scanpy by Malte Luecken.
Parameters
----------
adata
The annotated data matrix.
basis
The embedding over which the density will be calculated. This embedded
representation should be found in `adata.obsm['X_[basis]']``.
groupby
Keys for categorical observation/cell annotation for which densities
are calculated per category. Columns with up to ten categories are
accepted.
key_added
Name of the `.obs` covariate that will be added with the density
estimates.
components
The embedding dimensions over which the density should be calculated.
This is limited to two components.
Returns
-------
Updates `adata.obs` with an additional field specified by the `key_added`
parameter. This parameter defaults to `[basis]_density_[groupby]`, where
where `[basis]` is one of `umap`, `diffmap`, `pca`, `tsne`, or `draw_graph_fa`
and `[groupby]` denotes the parameter input.
Updates `adata.uns` with an additional field `[key_added]_params`.
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.umap(adata)
>>> sc.tl.embedding_density(adata, basis='umap', groupby='phase')
>>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase',
... group='G1')
>>> sc.pl.embedding_density(adata, basis='umap', key='umap_density_phase',
... group='S') |
18,439 | def build_docs(location="doc-source", target=None, library="icetea_lib"):
cmd_ar = ["sphinx-apidoc", "-o", location, library]
try:
print("Generating api docs.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
".")
return 3
target = "doc{}html".format(os.sep) if target is None else target
cmd_ar = ["sphinx-build", "-b", "html", location, target]
try:
print("Building html documentation.")
retcode = check_call(cmd_ar)
except CalledProcessError as error:
print("Documentation build failed. Return code: {}".format(error.returncode))
return 3
except OSError as error:
print(error)
print("Documentation build failed. Are you missing Sphinx? Please install sphinx using "
".")
return 3
print("Documentation built.")
return 0 | Build documentation for Icetea. Start by autogenerating module documentation
and finish by building html.
:param location: Documentation source
:param target: Documentation target path
:param library: Library location for autodoc.
:return: -1 if something fails. 0 if successfull. |
18,440 | def _gerritCmd(self, *args):
if self.gerrit_identity_file is not None:
options = [, self.gerrit_identity_file]
else:
options = []
return [] + options + [
.join((self.gerrit_username, self.gerrit_server)),
, str(self.gerrit_port),
] + list(args) | Construct a command as a list of strings suitable for
:func:`subprocess.call`. |
18,441 | def analyze_bash_vars(job_input_file, job_homedir):
{"$dnanexus_link": "file-xxxx"}{"$dnanexus_link": "file-yyyy"}
_, file_entries, rest_hash = get_job_input_filenames(job_input_file)
patterns_dict = get_input_spec_patterns()
def get_prefix(basename, key):
best_prefix = None
patterns = patterns_dict.get(key)
if patterns is not None:
for pattern in patterns:
if fnmatch.fnmatch(basename, pattern):
_, _, right_piece = pattern.rpartition("*")
best_prefix = choose_shorter_string(best_prefix, basename[:-len(right_piece)])
if best_prefix is not None:
return best_prefix
else:
parts = os.path.splitext(basename)
if parts[1] == ".gz":
parts = os.path.splitext(parts[0])
return parts[0]
def factory():
return {: [], : [], : [], : []}
file_key_descs = collections.defaultdict(factory)
rel_home_dir = get_input_dir(job_homedir)
for key, entries in list(file_entries.items()):
for entry in entries:
filename = entry[]
basename = os.path.basename(filename)
prefix = get_prefix(basename, key)
k_desc = file_key_descs[key]
k_desc[].append(entry[])
k_desc[].append(basename)
k_desc[].append(prefix)
k_desc[].append(os.path.join(rel_home_dir, filename))
return file_key_descs, rest_hash | This function examines the input file, and calculates variables to
instantiate in the shell environment. It is called right before starting the
execution of an app in a worker.
For each input key, we want to have
$var
$var_filename
$var_prefix
remove last dot (+gz), and/or remove patterns
$var_path
$HOME/in/var/$var_filename
For example,
$HOME/in/genes/A.txt
B.txt
export genes=('{"$dnanexus_link": "file-xxxx"}' '{"$dnanexus_link": "file-yyyy"}')
export genes_filename=("A.txt" "B.txt")
export genes_prefix=("A" "B")
export genes_path=("$HOME/in/genes/A.txt" "$HOME/in/genes/B.txt")
If there are patterns defined in the input spec, then the prefix respects them.
Here are several examples, where the patterns are:
*.bam, *.bwa-index.tar.gz, foo*.sam, z*ra.sam
file name prefix matches
foo.zed.bam foo.zed *.bam
xxx.bwa-index.tar.gz xxx *.bwa-index.tar.gz
food.sam food foo*.sam
zebra.sam zebra z*ra.sam
xx.c xx
xx.c.gz xx
The only patterns we recognize are of the form x*.y. For example:
legal *.sam, *.c.py, foo*.sam, a*b*c.baz
ignored uu.txt x???.tar mon[a-z].py |
18,442 | def _http_put(self, url, data, **kwargs):
kwargs.update({: json.dumps(data)})
return self._http_request(, url, kwargs) | Performs the HTTP PUT request. |
18,443 | def set_imap(self, imap, callback=True):
self.imap = imap
self.calc_imap()
with self.suppress_changed:
self.recalc()
self.t_.set(intensity_map=imap.name, callback=False) | Set the intensity map used by this RGBMapper.
`imap` specifies an IntensityMap object. If `callback` is True, then
any callbacks associated with this change will be invoked. |
18,444 | def subset(self, service=None):
if service is None:
for serviceName in self.ncssServiceNames:
if serviceName in self.access_urls:
service = serviceName
break
else:
raise RuntimeError()
elif service not in self.ncssServiceNames:
raise ValueError(service +
+ .join(self.ncssServiceNames))
return self.access_with_service(service) | Subset the dataset.
Open the remote dataset and get a client for talking to ``service``.
Parameters
----------
service : str, optional
The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset'
or 'NetcdfServer', in that order, depending on the services listed in the
catalog.
Returns
-------
a client for communicating using ``service`` |
18,445 | def get_config_parameter_boolean(config: ConfigParser,
section: str,
param: str,
default: bool) -> bool:
try:
value = config.getboolean(section, param)
except (TypeError, ValueError, NoOptionError):
log.warning(
"Configuration variable {} not found or improper in section [{}]; "
"using default of {!r}", param, section, default)
value = default
return value | Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default |
18,446 | def search(self, query, nid=None):
r = self.request(
method="network.search",
nid=nid,
data=dict(query=query)
)
return self._handle_error(r, "Search with query failed."
.format(query)) | Search for posts with ``query``
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
:type query: str
:param query: The search query; should just be keywords for posts
that you are looking for |
18,447 | def get_secret(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> Optional[Secret]:
partial_unlock_proof = end_state.secrethashes_to_unlockedlocks.get(secrethash)
if partial_unlock_proof is None:
partial_unlock_proof = end_state.secrethashes_to_onchain_unlockedlocks.get(secrethash)
if partial_unlock_proof is not None:
return partial_unlock_proof.secret
return None | Returns `secret` if the `secrethash` is for a lock with a known secret. |
18,448 | def setRequest(self, endPointReference, action):
self._action = action
self.header_pyobjs = None
pyobjs = []
namespaceURI = self.wsAddressURI
addressTo = self._addressTo
messageID = self._messageID = "uuid:%s" %time.time()
typecode = GED(namespaceURI, "MessageID")
pyobjs.append(typecode.pyclass(messageID))
typecode = GED(namespaceURI, "Action")
pyobjs.append(typecode.pyclass(action))
typecode = GED(namespaceURI, "To")
pyobjs.append(typecode.pyclass(addressTo))
typecode = GED(namespaceURI, "From")
mihFrom = typecode.pyclass()
mihFrom._Address = self.anonymousURI
pyobjs.append(mihFrom)
if endPointReference:
if hasattr(endPointReference, ) is False:
raise EvaluateException,
if isinstance(endPointReference.typecode, \
GTD(namespaceURI ,)) is False:
raise EvaluateException, \
%GTD(namespaceURI ,)
ReferenceProperties = getattr(endPointReference, , None)
if ReferenceProperties is not None:
for v in getattr(ReferenceProperties, , ()):
if not hasattr(v,):
raise EvaluateException,
pyobjs.append(v)
self.header_pyobjs = tuple(pyobjs) | Call For Request |
18,449 | def get_logger(name):
log = logging.getLogger(name)
if not log.handlers:
log.addHandler(NullHandler())
return log | Return logger with null handle |
18,450 | def date_struct_nn(year, month, day, tz="UTC"):
if not day:
day = 1
if not month:
month = 1
return date_struct(year, month, day, tz) | Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates |
18,451 | def command(execute=None):
if connexion.request.is_json:
execute = Execute.from_dict(connexion.request.get_json())
return | Execute a Command
Execute a command # noqa: E501
:param execute: The data needed to execute this command
:type execute: dict | bytes
:rtype: Response |
18,452 | def _init_valid_functions(action_dimensions):
sizes = {
"screen": tuple(int(i) for i in action_dimensions.screen),
"screen2": tuple(int(i) for i in action_dimensions.screen),
"minimap": tuple(int(i) for i in action_dimensions.minimap),
}
types = actions.Arguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.FUNCTIONS])
return actions.ValidActions(types, functions) | Initialize ValidFunctions and set up the callbacks. |
18,453 | def _get_offset(text, visible_width, unicode_aware=True):
result = 0
width = 0
if unicode_aware:
for c in text:
if visible_width - width <= 0:
break
result += 1
width += wcwidth(c)
if visible_width - width < 0:
result -= 1
else:
result = min(len(text), visible_width)
return result | Find the character offset within some text for a given visible offset (taking into account the
fact that some character glyphs are double width).
:param text: The text to analyze
:param visible_width: The required location within that text (as seen on screen).
:return: The offset within text (as a character offset within the string). |
18,454 | def get_file_type(filename):
txt_extensions = [".txt", ".dat", ".csv"]
hdf_extensions = [".hdf", ".h5", ".bkup", ".checkpoint"]
for ext in hdf_extensions:
if filename.endswith(ext):
with _h5py.File(filename, ) as fp:
filetype = fp.attrs[]
return filetypes[filetype]
for ext in txt_extensions:
if filename.endswith(ext):
return InferenceTXTFile
raise TypeError("Extension is not supported.") | Returns I/O object to use for file.
Parameters
----------
filename : str
Name of file.
Returns
-------
file_type : {InferenceFile, InferenceTXTFile}
The type of inference file object to use. |
18,455 | def _format_explain(self):
lines = []
for (command, kwargs) in self._call_list:
lines.append(command + " " + pformat(kwargs))
return "\n".join(lines) | Format the results of an EXPLAIN |
18,456 | def convert_graph(self, graph_file, input_format, output_formats,
email=None, use_threads=False, callback=None):
if email is None:
email = self.email
if input_format not in GraphFormats._any:
raise ValueError("Invalid input format {}.".format(input_format))
if not set(output_formats) <= set(GraphFormats._any):
raise ValueError("Output formats must be a GraphFormats.")
if use_threads and callback is not None:
if not hasattr(callback, ):
raise ValueError("callback must be a function.")
if len(inspect.getargspec(callback).args) != 1:
raise ValueError("callback must take exactly 1 argument.")
if not (os.path.exists(graph_file)):
raise ValueError("No such file, {}!".format(graph_file))
url = "convert/{}/{}/{}/l".format(
email,
input_format,
.join(output_formats)
)
if " " in url:
raise ValueError("Spaces are not permitted in arguments.")
if use_threads:
convert_thread = threading.Thread(
target=self._run_convert_graph,
args=[url, graph_file, callback]
)
convert_thread.start()
else:
return self._run_convert_graph(url, graph_file)
return | Convert a graph from one GraphFormat to another.
Arguments:
graph_file (str): Filename of the file to convert
input_format (str): A grute.GraphFormats
output_formats (str[]): A grute.GraphFormats
email (str: self.email)*: The email to notify
use_threads (bool: False)*: Whether to use Python threads to run
computation in the background when waiting for the server
callback (function: None)*: The function to run upon completion of
the call, if using threads. (Will not be called if use_threads
is set to False.)
Returns:
HTTP Response if use_threads=False. Else, no return value.
Raises:
RemoteDataUploadError: If there's an issue uploading the data
RemoteError: If there's a server-side issue
ValueError: If there's a problem with the supplied arguments |
18,457 | async def _request(self, *, http_verb, api_url, req_args):
if self.session and not self.session.closed:
async with self.session.request(http_verb, api_url, **req_args) as res:
self._logger.debug("Ran the request with existing session.")
return {
"data": await res.json(),
"headers": res.headers,
"status_code": res.status,
}
async with aiohttp.ClientSession(
loop=self._event_loop, timeout=aiohttp.ClientTimeout(total=self.timeout)
) as session:
async with session.request(http_verb, api_url, **req_args) as res:
self._logger.debug("Ran the request with a new session.")
return {
"data": await res.json(),
"headers": res.headers,
"status_code": res.status,
} | Submit the HTTP request with the running session or a new session.
Returns:
A dictionary of the response data. |
18,458 | def new(expr, *args, **kwargs):
current_args, current_var_args, current_kwargs = get_vars(expr)
new_kwargs = current_kwargs.copy()
recursive_arguments = {}
for key in tuple(kwargs):
if "__" in key:
value = kwargs.pop(key)
key, _, subkey = key.partition("__")
recursive_arguments.setdefault(key, []).append((subkey, value))
for key, pairs in recursive_arguments.items():
recursed_object = current_args.get(key, current_kwargs.get(key))
if recursed_object is None:
continue
kwargs[key] = new(recursed_object, **dict(pairs))
if args:
current_var_args = args
for key, value in kwargs.items():
if key in current_args:
current_args[key] = value
else:
new_kwargs[key] = value
new_args = list(current_args.values()) + list(current_var_args)
return type(expr)(*new_args, **new_kwargs) | Template an object.
::
>>> class MyObject:
... def __init__(self, arg1, arg2, *var_args, foo=None, bar=None, **kwargs):
... self.arg1 = arg1
... self.arg2 = arg2
... self.var_args = var_args
... self.foo = foo
... self.bar = bar
... self.kwargs = kwargs
...
>>> my_object = MyObject('a', 'b', 'c', 'd', foo='x', quux=['y', 'z'])
::
>>> import uqbar
>>> new_object = uqbar.objects.new(my_object, foo=666, bar=1234)
>>> print(uqbar.objects.get_repr(new_object))
MyObject(
'a',
'b',
'c',
'd',
bar=1234,
foo=666,
quux=['y', 'z'],
)
Original object is unchanged:
::
>>> print(uqbar.objects.get_repr(my_object))
MyObject(
'a',
'b',
'c',
'd',
foo='x',
quux=['y', 'z'],
) |
18,459 | def import_log_funcs():
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func) | Import the common log functions from the global logger to the module. |
18,460 | def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):
dimension = asdim(dimension)
if dimension in self.dimensions():
raise Exception(.format(dim=dimension.name))
if vdim and self._deep_indexable:
raise Exception()
if vdim:
dims = self.vdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(vdims=dims)
dim_pos += self.ndims
else:
dims = self.kdims[:]
dims.insert(dim_pos, dimension)
dimensions = dict(kdims=dims)
if isinstance(dim_val, basestring) or not hasattr(dim_val, ):
dim_val = cycle([dim_val])
else:
if not len(dim_val) == len(self):
raise ValueError("Added dimension values must be same length"
"as existing keys.")
items = OrderedDict()
for dval, (key, val) in zip(dim_val, self.data.items()):
if vdim:
new_val = list(val)
new_val.insert(dim_pos, dval)
items[key] = tuple(new_val)
else:
new_key = list(key)
new_key.insert(dim_pos, dval)
items[tuple(new_key)] = val
return self.clone(items, **dict(dimensions, **kwargs)) | Adds a dimension and its values to the object
Requires the dimension name or object, the desired position in
the key dimensions and a key value scalar or sequence of the
same length as the existing keys.
Args:
dimension: Dimension or dimension spec to add
dim_pos (int) Integer index to insert dimension at
dim_val (scalar or ndarray): Dimension value(s) to add
vdim: Disabled, this type does not have value dimensions
**kwargs: Keyword arguments passed to the cloned element
Returns:
Cloned object containing the new dimension |
18,461 | def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: ,
vars_list: List[str]) -> :
s output.
'
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch) | Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output. |
18,462 | def error_value_processor(value, error):
if isinstance(error, (str, unicode)):
try:
if "%" in error:
error_float = float(error.replace("%", ""))
error_abs = (value/100) * error_float
return error_abs
elif error == "":
error = 0.0
else:
error = float(error)
except:
pass
return error | If an error is a percentage, we convert to a float, then
calculate the percentage of the supplied value.
:param value: base value, e.g. 10
:param error: e.g. 20.0%
:return: the absolute error, e.g. 12 for the above case. |
18,463 | def build(self, lv2_uri):
try:
plugin = self._plugins[lv2_uri]
except KeyError:
raise Lv2EffectBuilderError(
"Lv2EffectBuilder not contains metadata information about the plugin . \n"
"Try re-scan the installed plugins using the reload method::\n"
" >>> lv2_effect_builder.reload(lv2_effect_builder.lv2_plugins_data())".format(lv2_uri))
return Lv2Effect(plugin) | Returns a new :class:`.Lv2Effect` by the valid lv2_uri
:param string lv2_uri:
:return Lv2Effect: Effect created |
18,464 | def __response_message_descriptor(self, message_type, method_id):
descriptor = {: {: }}
if message_type != message_types.VoidMessage():
self.__parser.add_message(message_type.__class__)
self.__response_schema[method_id] = self.__parser.ref_for_message_type(
message_type.__class__)
descriptor[][] = {: .format(
self.__response_schema[method_id])}
return dict(descriptor) | Describes the response.
Args:
message_type: messages.Message class, The message to describe.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
Returns:
Dictionary describing the response. |
18,465 | def _info_long(self) -> Optional[str]:
try:
return str(
html.unescape(self.journey.InfoTextList.InfoText.get("textL")).replace(
"<br />", "\n"
)
)
except AttributeError:
return None | Extract journey information. |
18,466 | def Case(self, caseVal, *statements):
"c-like case of switch statement"
assert self.parentStm is None
caseVal = toHVal(caseVal, self.switchOn._dtype)
assert isinstance(caseVal, Value), caseVal
assert caseVal._isFullVld(), "Cmp with invalid value"
assert caseVal not in self._case_value_index, (
"Switch statement already has case for value ", caseVal)
self.rank += 1
case = []
self._case_value_index[caseVal] = len(self.cases)
self.cases.append((caseVal, case))
cond = self.switchOn._eq(caseVal)
self._inputs.append(cond)
cond.endpoints.append(self)
self._register_stements(statements, case)
return self | c-like case of switch statement |
18,467 | def remove_node(self, node, stop=False):
if node.kind not in self.nodes:
raise NodeNotFound("Unable to remove node %s: invalid node type `%s`.",
node.name, node.kind)
else:
try:
index = self.nodes[node.kind].index(node)
if self.nodes[node.kind][index]:
del self.nodes[node.kind][index]
if stop:
node.stop()
self._naming_policy.free(node.kind, node.name)
self.repository.save_or_update(self)
except ValueError:
raise NodeNotFound("Node %s not found in cluster" % node.name) | Removes a node from the cluster.
By default, it doesn't also stop the node, just remove from
the known hosts of this cluster.
:param node: node to remove
:type node: :py:class:`Node`
:param stop: Stop the node
:type stop: bool |
18,468 | def roll(self, speed, heading, state=1):
return self.write(request.Roll(self.seq, speed, heading, state )) | speed can have value between 0x00 and 0xFF
heading can have value between 0 and 359 |
18,469 | def blockSelectionSignals( self, state ):
if ( self._selectionSignalsBlocked == state ):
return
self._selectionSignalsBlocked = state
if ( not state ):
self.emitSelectionFinished() | Sets the state for the seleciton finished signal. When it \
is set to True, it will emit the signal. This is used \
internally to control selection signal propogation, so \
should not really be called unless you know why you are \
calling it.
:param state <bool> |
18,470 | def _parseSCDOCDC(self, src):
while 1:
src = src.lstrip()
if src.startswith():
src = src[4:]
elif src.startswith():
src = src[3:]
else:
break
return src | [S|CDO|CDC]* |
18,471 | def storage(self, *, resource=None):
if not isinstance(self.protocol, MSGraphProtocol):
raise RuntimeError(
)
return Storage(parent=self, main_resource=resource) | Get an instance to handle file storage (OneDrive / Sharepoint)
for the specified account resource
:param str resource: Custom resource to be used in this drive object
(Defaults to parent main_resource)
:return: a representation of OneDrive File Storage
:rtype: Storage
:raises RuntimeError: if protocol doesn't support the feature |
18,472 | def by_value(self, value, default=None):
try:
return [k for k, v in self.items() if v == value][0]
except IndexError:
if default is not None:
return default
raise ValueError( % value) | Returns the key for the given value |
18,473 | def do_cd(self, line):
args = self.line_to_args(line)
if len(args) == 0:
dirname =
else:
if args[0] == :
dirname = self.prev_dir
else:
dirname = args[0]
dirname = resolve_path(dirname)
mode = auto(get_mode, dirname)
if mode_isdir(mode):
global cur_dir
self.prev_dir = cur_dir
cur_dir = dirname
auto(chdir, dirname)
else:
print_err("Directory does not exist" % dirname) | cd DIRECTORY
Changes the current directory. ~ expansion is supported, and cd -
goes to the previous directory. |
18,474 | def get(key, default=-1):
if isinstance(key, int):
return Suite(key)
if key not in Suite._member_map_:
extend_enum(Suite, key, default)
return Suite[key] | Backport support for original codes. |
18,475 | def exec_command(cmd, in_data=, chdir=None, shell=None, emulate_tty=False):
assert isinstance(cmd, mitogen.core.UnicodeType)
return exec_args(
args=[get_user_shell(), , cmd],
in_data=in_data,
chdir=chdir,
shell=shell,
emulate_tty=emulate_tty,
) | Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param bytes cmd:
String command line, passed to user's shell.
:param bytes in_data:
Optional standard input for the command.
:return:
(return code, stdout bytes, stderr bytes) |
18,476 | def do_set_log_level(self, arg):
if arg in [, ]:
_LOGGING.info(, arg)
if arg == :
_LOGGING.setLevel(logging.INFO)
_INSTEONPLM_LOGGING.setLevel(logging.INFO)
else:
_LOGGING.setLevel(logging.DEBUG)
_INSTEONPLM_LOGGING.setLevel(logging.DEBUG)
else:
_LOGGING.error()
self.do_help() | Set the log level.
Usage:
set_log_level i|v
Parameters:
log_level: i - info | v - verbose |
18,477 | def load(self, verbose=False):
self._songs = []
page_num = 1
total_pages = 1
while page_num <= total_pages:
if verbose:
print( % page_num)
page = requests.get(ARTIST_URL.format(artist=self.name,
n=page_num))
tree = html.fromstring(page.text)
song_rows_xp = r
songlist_pagination_xp = r\
rows = tree.xpath(song_rows_xp)
for row in rows:
song_link = row.xpath(r)
assert len(song_link) == 1
self._songs.append(Song(url=song_link[0].attrib[]))
total_pages = len(tree.xpath(songlist_pagination_xp))
page_num += 1
return self | Load the list of songs.
Note that this only loads a list of songs that this artist was the main
artist of. If they were only featured in the song, that song won't be
listed here. There is a list on the artist page for that, I just
haven't added any parsing code for that, since I don't need it. |
18,478 | def register_comet_callback(self, *args, **kwargs):
sijax.plugin.comet.register_comet_callback(self._sijax, *args, **kwargs) | Registers a single Comet callback function
(see :ref:`comet-plugin`).
Refer to :func:`sijax.plugin.comet.register_comet_callback`
for more details - its signature differs slightly.
This method's signature is the same, except that the first
argument that :func:`sijax.plugin.comet.register_comet_callback`
expects is the Sijax instance, and this method
does that automatically, so you don't have to do it. |
18,479 | def in_project_directory() -> bool:
current_directory = os.path.realpath(os.curdir)
project_path = os.path.join(current_directory, )
return os.path.exists(project_path) and os.path.isfile(project_path) | Returns whether or not the current working directory is a Cauldron project
directory, which contains a cauldron.json file. |
18,480 | def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs):
for gate in self.gates:
gate.plot(flip=flip, ax_channels=ax_channels, ax=ax, *args, **kwargs) | {_gate_plot_doc} |
18,481 | def _lazy_load_get_model():
if django is None:
def _get_model(app, model):
raise import_failure
else:
from django import apps as django_apps
_get_model = django_apps.apps.get_model
_LAZY_LOADS[] = _get_model | Lazy loading of get_model.
get_model loads django.conf.settings, which may fail if
the settings haven't been configured yet. |
18,482 | def get_new_selection_attr_state(self, selection, attr_key):
cell_attributes = self.grid.code_array.cell_attributes
attr_values = self.attr_toggle_values[attr_key]
attr_map = dict(zip(attr_values, attr_values[1:] + attr_values[:1]))
selection_attrs = \
(attr for attr in cell_attributes if attr[0] == selection)
attrs = {}
for selection_attr in selection_attrs:
attrs.update(selection_attr[2])
if attr_key in attrs:
return attr_map[attrs[attr_key]]
else:
return self.attr_toggle_values[attr_key][1] | Toggles new attr selection state and returns it
Parameters
----------
selection: Selection object
\tSelection for which attr toggle shall be returned
attr_key: Hashable
\tAttribute key |
18,483 | def update(self, cur_value, mesg=None):
self.cur_value = cur_value
progress = float(self.cur_value) / self.max_value
num_chars = int(progress * self.max_chars)
num_left = self.max_chars - num_chars
if mesg is not None:
self.mesg = mesg
bar = self.template.format(self.progress_character * num_chars,
* num_left,
progress * 100,
self.spinner_symbols[self.spinner_index],
self.mesg)
sys.stdout.write(bar)
if self.spinner:
self.spinner_index = (self.spinner_index + 1) % self.n_spinner
sys.stdout.flush() | Update progressbar with current value of process
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
(cur_value / max_value) * 100
mesg : str
Message to display to the right of the progressbar. If None, the
last message provided will be used. To clear the current message,
pass a null string, ''. |
18,484 | def predict(self, X):
X_ = self._check_array(X)
return exp(dot(X_, self._coef)) | Predict count for samples in X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples,]
Predicted count for each sample |
18,485 | def late():
from rez.package_resources_ import package_rex_keys
def decorated(fn):
if fn.__name__ in package_rex_keys:
raise ValueError("Cannot use @late decorator on function "
% fn.__name__)
setattr(fn, "_late", True)
_add_decorator(fn, "late")
return fn
return decorated | Used by functions in package.py that are evaluated lazily.
The term 'late' refers to the fact these package attributes are evaluated
late, ie when the attribute is queried for the first time.
If you want to implement a package.py attribute as a function, you MUST use
this decorator - otherwise it is understood that you want your attribute to
be a function, not the return value of that function. |
18,486 | def filter_human_only(stmts_in, **kwargs):
from indra.databases import uniprot_client
if in kwargs and kwargs[]:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get()
logger.info( %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get()
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info( % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. |
18,487 | def prior_from_config(cp, prior_section=):
variable_params, _ = distributions.read_params_from_config(
cp, prior_section=prior_section, vargs_section=,
sargs_section=)
constraints = distributions.read_constraints_from_config(cp)
dists = distributions.read_distributions_from_config(cp, prior_section)
return distributions.JointDistribution(variable_params, *dists,
**{"constraints": constraints}) | Loads a prior distribution from the given config file.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
The config file to read.
sections : list of str, optional
The sections to retrieve the prior from. If ``None`` (the default),
will look in sections starting with 'prior'.
Returns
-------
distributions.JointDistribution
The prior distribution. |
18,488 | def kill(self, sig):
if self.is_alive() and self._loop:
self._loop.call_soon_threadsafe(self._loop.stop) | Invoke the stop on the event loop method. |
18,489 | def chrome_getdata_view(request):
data = {}
if request.user.is_authenticated:
notifs = GCMNotification.objects.filter(sent_to__user=request.user).order_by("-time")
if notifs.count() > 0:
notif = notifs.first()
ndata = notif.data
if "title" in ndata and "text" in ndata:
data = {
"title": ndata[] if in ndata else ,
"text": ndata[] if in ndata else ,
"url": ndata[] if in ndata else
}
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
return HttpResponse("null", content_type="text/json")
else:
schedule_chk = chrome_getdata_check(request)
if schedule_chk:
data = schedule_chk
else:
data = {"title": "Check Intranet", "text": "You have a new notification that couldn't be loaded right now."}
j = json.dumps(data)
return HttpResponse(j, content_type="text/json") | Get the data of the last notification sent to the current user.
This is needed because Chrome, as of version 44, doesn't support
sending a data payload to a notification. Thus, information on what
the notification is actually for must be manually fetched. |
18,490 | def remove_reactions(self, reactions, remove_orphans=False):
if isinstance(reactions, string_types) or hasattr(reactions, "id"):
warn("need to pass in a list")
reactions = [reactions]
context = get_context(self)
for reaction in reactions:
try:
reaction = self.reactions[self.reactions.index(reaction)]
except ValueError:
warn( % (reaction, self))
else:
forward = reaction.forward_variable
reverse = reaction.reverse_variable
if context:
obj_coef = reaction.objective_coefficient
if obj_coef != 0:
context(partial(
self.solver.objective.set_linear_coefficients,
{forward: obj_coef, reverse: -obj_coef}))
context(partial(self._populate_solver, [reaction]))
context(partial(setattr, reaction, , self))
context(partial(self.reactions.add, reaction))
self.remove_cons_vars([forward, reverse])
self.reactions.remove(reaction)
reaction._model = None
for met in reaction._metabolites:
if reaction in met._reaction:
met._reaction.remove(reaction)
if context:
context(partial(met._reaction.add, reaction))
if remove_orphans and len(met._reaction) == 0:
self.remove_metabolites(met)
for gene in reaction._genes:
if reaction in gene._reaction:
gene._reaction.remove(reaction)
if context:
context(partial(gene._reaction.add, reaction))
if remove_orphans and len(gene._reaction) == 0:
self.genes.remove(gene)
if context:
context(partial(self.genes.add, gene))
associated_groups = self.get_associated_groups(reaction)
for group in associated_groups:
group.remove_members(reaction) | Remove reactions from the model.
The change is reverted upon exit when using the model as a context.
Parameters
----------
reactions : list
A list with reactions (`cobra.Reaction`), or their id's, to remove
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well |
18,491 | def _configure_send(self, request, **kwargs):
requests_kwargs = {}
session = kwargs.pop(, self.session)
if session is not self.session:
self._init_session(session)
session.max_redirects = int(self.config.redirect_policy())
session.trust_env = bool(self.config.proxies.use_env_settings)
requests_kwargs.update(self.config.connection())
requests_kwargs[] = bool(self.config.redirect_policy)
requests_kwargs[] = self.config.headers.copy()
proxies = self.config.proxies()
if proxies:
requests_kwargs[] = proxies
for key in kwargs:
if key in self._REQUESTS_KWARGS:
requests_kwargs[key] = kwargs[key]
def make_user_hook_cb(user_hook, session):
def user_hook_cb(r, *args, **kwargs):
kwargs.setdefault("msrest", {})[] = session
return user_hook(r, *args, **kwargs)
return user_hook_cb
hooks = []
for user_hook in self.config.hooks:
hooks.append(make_user_hook_cb(user_hook, self.session))
if hooks:
requests_kwargs[] = {: hooks}
output_kwargs = self.config.session_configuration_callback(
session,
self.config,
kwargs,
**requests_kwargs
)
if output_kwargs is not None:
requests_kwargs = output_kwargs
if session is not self.session:
requests_kwargs[] = session
requests_kwargs[] = kwargs.get(, True)
if request.files:
requests_kwargs[] = request.files
elif request.data:
requests_kwargs[] = request.data
requests_kwargs[].update(request.headers)
return requests_kwargs | Configure the kwargs to use with requests.
See "send" for kwargs details.
:param ClientRequest request: The request object to be sent.
:returns: The requests.Session.request kwargs
:rtype: dict[str,str] |
18,492 | def pivot(self, index, **kwargs):
try:
df = self._pivot(index, **kwargs)
return pd.pivot_table(self.df, index=kwargs["index"], **kwargs)
except Exception as e:
self.err(e, "Can not pivot dataframe") | Pivots a dataframe |
18,493 | def run_updater_in_background(self):
thread = threading.Thread(target=self.updater_loop())
thread.daemon = True
thread.start() | Starts a thread that runs the updater in the background. |
18,494 | def parameter_to_field(self, name):
if name not in self._parameters:
raise ValueError("no parameter found" % (name))
if self._fields.count(name) > 0:
raise ValueError("field with name already exists" % (name))
data = np.array([self._parameters[name]]*self._num_fix)
self.rm_parameter(name)
self.add_field(name, data) | Promotes a parameter to a field by creating a new array of same
size as the other existing fields, filling it with the current
value of the parameter, and then removing that parameter. |
18,495 | def _related(self, concept):
return concept.hypernyms() + \
concept.hyponyms() + \
concept.member_meronyms() + \
concept.substance_meronyms() + \
concept.part_meronyms() + \
concept.member_holonyms() + \
concept.substance_holonyms() + \
concept.part_holonyms() + \
concept.attributes() + \
concept.also_sees() + \
concept.similar_tos() | Returns related concepts for a concept. |
18,496 | def write_entity(self, entity):
db, db_object_id = self._split_prefix(entity)
taxon = normalize_taxon(entity["taxon"]["id"])
vals = [
db,
db_object_id,
entity.get(),
entity.get(),
entity.get(),
entity.get(),
taxon,
entity.get(),
entity.get(),
entity.get()
]
self._write_row(vals) | Write a single entity to a line in the output file |
18,497 | def user_present(name,
password,
email,
tenant=None,
enabled=True,
roles=None,
profile=None,
password_reset=True,
project=None,
**connection_args):
ret = {: name,
: {},
: True,
: .format(name)}
_api_version(profile=profile, **connection_args)
if project and not tenant:
tenant = project
if tenant is not None:
tenantdata = __salt__[](name=tenant,
profile=profile,
**connection_args)
if in tenantdata:
ret[] = False
ret[] = .format(tenant)
return ret
tenant_id = tenantdata[tenant][]
else:
tenant_id = None
user = __salt__[](name=name, profile=profile,
**connection_args)
if not in user:
change_email = False
change_enabled = False
change_tenant = False
change_password = False
if user[name].get(, None) != email:
change_email = True
if user[name].get(, None) != enabled:
change_enabled = True
if tenant and (_TENANT_ID not in user[name] or
user[name].get(_TENANT_ID, None) != tenant_id):
change_tenant = True
if (password_reset is True and
not __salt__[](name=name,
password=password,
profile=profile,
**connection_args)):
change_password = True
if __opts__.get() and (change_email or change_enabled or change_tenant or change_password):
ret[] = None
ret[] = .format(name)
if change_email is True:
ret[][] =
if change_enabled is True:
ret[][] =
if change_tenant is True:
ret[][] = .format(tenant)
if change_password is True:
ret[][] =
return ret
ret[] = .format(name)
if change_email:
__salt__[](name=name, email=email, profile=profile, **connection_args)
ret[] = .format(name)
ret[][] =
if change_enabled:
__salt__[](name=name, enabled=enabled, profile=profile, **connection_args)
ret[] = .format(name)
ret[][] = .format(enabled)
if change_tenant:
__salt__[](name=name, tenant=tenant, profile=profile, **connection_args)
ret[] = .format(name)
ret[][] = .format(tenant)
if change_password:
__salt__[](name=name, password=password, profile=profile,
**connection_args)
ret[] = .format(name)
ret[][] =
if roles:
for tenant in roles:
args = dict({: name, :
tenant, : profile}, **connection_args)
tenant_roles = __salt__[](**args)
for role in roles[tenant]:
if role not in tenant_roles:
if __opts__.get():
ret[] = None
ret[] = .format(name)
return ret
addargs = dict({: name, : role,
: tenant,
: profile},
**connection_args)
newrole = __salt__[](**addargs)
if in ret[]:
ret[][].append(newrole)
else:
ret[][] = [newrole]
roles_to_remove = list(set(tenant_roles) - set(roles[tenant]))
for role in roles_to_remove:
if __opts__.get():
ret[] = None
ret[] = .format(name)
return ret
addargs = dict({: name, : role,
: tenant,
: profile},
**connection_args)
oldrole = __salt__[](**addargs)
if in ret[]:
ret[][].append(oldrole)
else:
ret[][] = [oldrole]
else:
if __opts__.get():
ret[] = None
ret[] = .format(name)
ret[][] =
return ret
__salt__[](name=name,
password=password,
email=email,
tenant_id=tenant_id,
enabled=enabled,
profile=profile,
**connection_args)
if roles:
for tenant in roles:
for role in roles[tenant]:
__salt__[](user=name,
role=role,
tenant=tenant,
profile=profile,
**connection_args)
ret[] = .format(name)
ret[][] =
return ret | Ensure that the keystone user is present with the specified properties.
name
The name of the user to manage
password
The password to use for this user.
.. note::
If the user already exists and a different password was set for
the user than the one specified here, the password for the user
will be updated. Please set the ``password_reset`` option to
``False`` if this is not the desired behavior.
password_reset
Whether or not to reset password after initial set. Defaults to
``True``.
email
The email address for this user
tenant
The tenant (name) for this user
project
The project (name) for this user (overrides tenant in api v3)
enabled
Availability state for this user
roles
The roles the user should have under given tenants.
Passed as a dictionary mapping tenant names to a list
of roles in this tenant, i.e.::
roles:
admin: # tenant
- admin # role
service:
- admin
- Member |
18,498 | def add_header_part(self):
header_part = HeaderPart.new(self.package)
rId = self.relate_to(header_part, RT.HEADER)
return header_part, rId | Return (header_part, rId) pair for newly-created header part. |
18,499 | def volshow(
data,
lighting=False,
data_min=None,
data_max=None,
max_shape=256,
tf=None,
stereo=False,
ambient_coefficient=0.5,
diffuse_coefficient=0.8,
specular_coefficient=0.5,
specular_exponent=5,
downscale=1,
level=[0.1, 0.5, 0.9],
opacity=[0.01, 0.05, 0.1],
level_width=0.1,
controls=True,
max_opacity=0.2,
memorder=,
extent=None,
):
fig = gcf()
if tf is None:
tf = transfer_function(level, opacity, level_width, controls=controls, max_opacity=max_opacity)
if data_min is None:
data_min = np.nanmin(data)
if data_max is None:
data_max = np.nanmax(data)
if memorder == :
data = data.T
if extent is None:
extent = [(0, k) for k in data.shape[::-1]]
if extent:
_grow_limits(*extent)
vol = ipv.Volume(
data_original=data,
tf=tf,
data_min=data_min,
data_max=data_max,
show_min=data_min,
show_max=data_max,
extent_original=extent,
data_max_shape=max_shape,
ambient_coefficient=ambient_coefficient,
diffuse_coefficient=diffuse_coefficient,
specular_coefficient=specular_coefficient,
specular_exponent=specular_exponent,
rendering_lighting=lighting,
)
vol._listen_to(fig)
if controls:
widget_opacity_scale = ipywidgets.FloatLogSlider(base=10, min=-2, max=2, description="opacity")
widget_brightness = ipywidgets.FloatLogSlider(base=10, min=-1, max=1, description="brightness")
ipywidgets.jslink((vol, ), (widget_opacity_scale, ))
ipywidgets.jslink((vol, ), (widget_brightness, ))
widgets_bottom = [ipywidgets.HBox([widget_opacity_scale, widget_brightness])]
current.container.children += tuple(widgets_bottom)
fig.volumes = fig.volumes + [vol]
return vol | Visualize a 3d array using volume rendering.
Currently only 1 volume can be rendered.
:param data: 3d numpy array
:param origin: origin of the volume data, this is to match meshes which have a different origin
:param domain_size: domain size is the size of the volume
:param bool lighting: use lighting or not, if set to false, lighting parameters will be overriden
:param float data_min: minimum value to consider for data, if None, computed using np.nanmin
:param float data_max: maximum value to consider for data, if None, computed using np.nanmax
:parap int max_shape: maximum shape for the 3d cube, if larger, the data is reduced by skipping/slicing (data[::N]),
set to None to disable.
:param tf: transfer function (or a default one)
:param bool stereo: stereo view for virtual reality (cardboard and similar VR head mount)
:param ambient_coefficient: lighting parameter
:param diffuse_coefficient: lighting parameter
:param specular_coefficient: lighting parameter
:param specular_exponent: lighting parameter
:param float downscale: downscale the rendering for better performance, for instance when set to 2, a 512x512
canvas will show a 256x256 rendering upscaled, but it will render twice as fast.
:param level: level(s) for the where the opacity in the volume peaks, maximum sequence of length 3
:param opacity: opacity(ies) for each level, scalar or sequence of max length 3
:param level_width: width of the (gaussian) bumps where the opacity peaks, scalar or sequence of max length 3
:param bool controls: add controls for lighting and transfer function or not
:param float max_opacity: maximum opacity for transfer function controls
:param extent: list of [[xmin, xmax], [ymin, ymax], [zmin, zmax]] values that define the bounds of the volume,
otherwise the viewport is used
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.