code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def slideshow(self, **kwargs):
"""
Produce slides show of the different cycles. One plot per cycle.
"""
for label, cycle in self.items():
cycle.plot(title=label, tight_layout=True) | Produce slides show of the different cycles. One plot per cycle. |
def get_year(self):
"""
Return the year from the database in the format expected by the URL.
"""
year = super(BuildableDayArchiveView, self).get_year()
fmt = self.get_year_format()
dt = date(int(year), 1, 1)
return dt.strftime(fmt) | Return the year from the database in the format expected by the URL. |
def fetch_aliases(self, seq_id, current_only=True, translate_ncbi_namespace=None):
"""return list of alias annotation records (dicts) for a given seq_id"""
return [dict(r) for r in self.find_aliases(seq_id=seq_id,
current_only=current_only,
translate_ncbi_namespace=translate_ncbi_namespace)] | return list of alias annotation records (dicts) for a given seq_id |
async def observer_orm_notify(self, message):
"""Process notification from ORM."""
@database_sync_to_async
def get_observers(table):
# Find all observers with dependencies on the given table.
return list(
Observer.objects.filter(
dependencies__table=table, subscribers__isnull=False
)
.distinct('pk')
.values_list('pk', flat=True)
)
observers_ids = await get_observers(message['table'])
for observer_id in observers_ids:
await self.channel_layer.send(
CHANNEL_WORKER, {'type': TYPE_EVALUATE, 'observer': observer_id}
) | Process notification from ORM. |
def _from_lattice_vectors(self):
"""Calculate the angles between the vectors that define the lattice.
_from_lattice_vectors will calculate the angles alpha, beta, and
gamma from the Lattice object attribute lattice_vectors.
"""
degreeConvsersion = 180.0 / np.pi
vector_magnitudes = np.linalg.norm(self.lattice_vectors, axis=1)
a_dot_b = np.dot(self.lattice_vectors[0], self.lattice_vectors[1])
b_dot_c = np.dot(self.lattice_vectors[1], self.lattice_vectors[2])
a_dot_c = np.dot(self.lattice_vectors[0], self.lattice_vectors[2])
alpha_raw = b_dot_c / (vector_magnitudes[1] * vector_magnitudes[2])
beta_raw = a_dot_c / (vector_magnitudes[0] * vector_magnitudes[2])
gamma_raw = a_dot_b / (vector_magnitudes[0] * vector_magnitudes[1])
alpha = np.arccos(np.clip(alpha_raw, -1.0, 1.0)) * degreeConvsersion
beta = np.arccos(np.clip(beta_raw, -1.0, 1.0)) * degreeConvsersion
gamma = np.arccos(np.clip(gamma_raw, -1.0, 1.0)) * degreeConvsersion
return np.asarray([alpha, beta, gamma], dtype=np.float64) | Calculate the angles between the vectors that define the lattice.
_from_lattice_vectors will calculate the angles alpha, beta, and
gamma from the Lattice object attribute lattice_vectors. |
def _logger_levels(self):
"""Return log levels."""
return {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
} | Return log levels. |
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries
) | Returns a BigQuery PEP 249 connection object. |
def _prepare_app(self, app):
"""
Normalize app data, preparing it for the detection phase.
"""
# Ensure these keys' values are lists
for key in ['url', 'html', 'script', 'implies']:
try:
value = app[key]
except KeyError:
app[key] = []
else:
if not isinstance(value, list):
app[key] = [value]
# Ensure these keys exist
for key in ['headers', 'meta']:
try:
value = app[key]
except KeyError:
app[key] = {}
# Ensure the 'meta' key is a dict
obj = app['meta']
if not isinstance(obj, dict):
app['meta'] = {'generator': obj}
# Ensure keys are lowercase
for key in ['headers', 'meta']:
obj = app[key]
app[key] = {k.lower(): v for k, v in obj.items()}
# Prepare regular expression patterns
for key in ['url', 'html', 'script']:
app[key] = [self._prepare_pattern(pattern) for pattern in app[key]]
for key in ['headers', 'meta']:
obj = app[key]
for name, pattern in obj.items():
obj[name] = self._prepare_pattern(obj[name]) | Normalize app data, preparing it for the detection phase. |
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout) | Run the mapper on the hadoop node. |
def from_onnx(self, graph):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
"""
# get input, output shapes
self.model_metadata = self.get_graph_metadata(graph)
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
self.arg_dict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
self.aux_dict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, self.arg_dict, self.aux_dict | Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights |
def multisorted(items, *keys):
"""Sort by multiple attributes.
Args:
items: An iterable series to be sorted.
*keys: Key objects which extract key values from the items.
The first key will be the most significant, and the
last key the least significant. If no key functions
are provided, the items will be sorted in ascending
natural order.
Returns:
A list of items sorted according to keys.
"""
if len(keys) == 0:
keys = [asc()]
for key in reversed(keys):
items = sorted(items, key=key.func, reverse=key.reverse)
return items | Sort by multiple attributes.
Args:
items: An iterable series to be sorted.
*keys: Key objects which extract key values from the items.
The first key will be the most significant, and the
last key the least significant. If no key functions
are provided, the items will be sorted in ascending
natural order.
Returns:
A list of items sorted according to keys. |
def weight_from_comm(self, v, comm):
""" The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
"""
return _c_louvain._MutableVertexPartition_weight_from_comm(self._partition, v, comm) | The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm` |
def merge_dicts(*dicts, **kwargs):
""" merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
"""
# get or infer the class
cls = kwargs.get("cls", None)
if cls is None:
for d in dicts:
if isinstance(d, dict):
cls = d.__class__
break
else:
raise TypeError("cannot infer cls as none of the passed objects is of type dict")
# start merging
merged_dict = cls()
for d in dicts:
if isinstance(d, dict):
merged_dict.update(d)
return merged_dict | merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'sentence') and self.sentence is not None:
_dict['sentence'] = self.sentence
if hasattr(self, 'subject') and self.subject is not None:
_dict['subject'] = self.subject._to_dict()
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action._to_dict()
if hasattr(self, 'object') and self.object is not None:
_dict['object'] = self.object._to_dict()
return _dict | Return a json dictionary representing this model. |
def _parse_v_parameters(val_type, val, filename, param_name):
"""
Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value.
"""
if val_type == "logical":
val = [i == "T" for i in val.split()]
elif val_type == "int":
try:
val = [int(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# LDAUL/J as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
elif val_type == "string":
val = val.split()
else:
try:
val = [float(i) for i in val.split()]
except ValueError:
# Fix for stupid error in vasprun sometimes which displays
# MAGMOM as 2****
val = _parse_from_incar(filename, param_name)
if val is None:
raise IOError("Error in parsing vasprun.xml")
return val | Helper function to convert a Vasprun array-type parameter into the proper
type. Boolean, int and float types are converted.
Args:
val_type: Value type parsed from vasprun.xml.
val: Actual string value parsed for vasprun.xml.
filename: Fullpath of vasprun.xml. Used for robust error handling.
E.g., if vasprun.xml contains \\*\\*\\* for some Incar parameters,
the code will try to read from an INCAR file present in the same
directory.
param_name: Name of parameter.
Returns:
Parsed value. |
def get_nested_attribute(obj, attribute):
"""
Returns the value of the given (possibly dotted) attribute for the given
object.
If any of the parents on the nested attribute's name path are `None`, the
value of the nested attribute is also assumed as `None`.
:raises AttributeError: If any attribute access along the attribute path
fails with an `AttributeError`.
"""
parent, attr = resolve_nested_attribute(obj, attribute)
if not parent is None:
attr_value = getattr(parent, attr)
else:
attr_value = None
return attr_value | Returns the value of the given (possibly dotted) attribute for the given
object.
If any of the parents on the nested attribute's name path are `None`, the
value of the nested attribute is also assumed as `None`.
:raises AttributeError: If any attribute access along the attribute path
fails with an `AttributeError`. |
def check_length_of_shape_or_intercept_names(name_list,
num_alts,
constrained_param,
list_title):
"""
Ensures that the length of the parameter names matches the number of
parameters that will be estimated. Will raise a ValueError otherwise.
Parameters
----------
name_list : list of strings.
Each element should be the name of a parameter that is to be estimated.
num_alts : int.
Should be the total number of alternatives in the universal choice set
for this dataset.
constrainted_param : {0, 1, True, False}
Indicates whether (1 or True) or not (0 or False) one of the type of
parameters being estimated will be constrained. For instance,
constraining one of the intercepts.
list_title : str.
Should specify the type of parameters whose names are being checked.
Examples include 'intercept_params' or 'shape_params'.
Returns
-------
None.
"""
if len(name_list) != (num_alts - constrained_param):
msg_1 = "{} is of the wrong length:".format(list_title)
msg_2 = "len({}) == {}".format(list_title, len(name_list))
correct_length = num_alts - constrained_param
msg_3 = "The correct length is: {}".format(correct_length)
total_msg = "\n".join([msg_1, msg_2, msg_3])
raise ValueError(total_msg)
return None | Ensures that the length of the parameter names matches the number of
parameters that will be estimated. Will raise a ValueError otherwise.
Parameters
----------
name_list : list of strings.
Each element should be the name of a parameter that is to be estimated.
num_alts : int.
Should be the total number of alternatives in the universal choice set
for this dataset.
constrainted_param : {0, 1, True, False}
Indicates whether (1 or True) or not (0 or False) one of the type of
parameters being estimated will be constrained. For instance,
constraining one of the intercepts.
list_title : str.
Should specify the type of parameters whose names are being checked.
Examples include 'intercept_params' or 'shape_params'.
Returns
-------
None. |
def window_open_config(self, temperature, duration):
"""Configures the window open behavior. The duration is specified in
5 minute increments."""
_LOGGER.debug("Window open config, temperature: %s duration: %s", temperature, duration)
self._verify_temperature(temperature)
if duration.seconds < 0 and duration.seconds > 3600:
raise ValueError
value = struct.pack('BBB', PROP_WINDOW_OPEN_CONFIG,
int(temperature * 2), int(duration.seconds / 300))
self._conn.make_request(PROP_WRITE_HANDLE, value) | Configures the window open behavior. The duration is specified in
5 minute increments. |
def QA_SU_save_index_min(engine, client=DATABASE):
"""save index_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_index_min(client=client) | save index_min
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE}) |
def write_json(obj, path):
"""Escribo un objeto a un archivo JSON con codificación UTF-8."""
obj_str = text_type(json.dumps(obj, indent=4, separators=(",", ": "),
ensure_ascii=False))
helpers.ensure_dir_exists(os.path.dirname(path))
with io.open(path, "w", encoding='utf-8') as target:
target.write(obj_str) | Escribo un objeto a un archivo JSON con codificación UTF-8. |
def random_choice(sequence):
""" Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. """
return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence) | Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence. |
def create_parser(subparsers):
""" create parser """
parser = subparsers.add_parser(
'version',
help='Display version',
usage="%(prog)s",
add_help=False)
args.add_titles(parser)
parser.set_defaults(subcommand='version')
return parser | create parser |
def quit(self):
"""
This function quits PlanarRad, checking if PlanarRad is running before.
"""
"""
Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.
"""
if self.is_running == True:
warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !',
"PlanarRad is running. Stop it before quit !",
QtGui.QMessageBox.Ok)
else:
quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if quit == QtGui.QMessageBox.Yes:
QtGui.qApp.quit() | This function quits PlanarRad, checking if PlanarRad is running before. |
def setRandomParams(self):
"""
set random hyperparameters
"""
params = sp.randn(self.getNumberParams())
self.setParams(params) | set random hyperparameters |
def _submit_request(self, url, params=None, data=None, headers=None,
method="GET"):
"""Submits the given request, and handles the errors appropriately.
Args:
url (str): the request to send.
params (dict): params to be passed along to get/post
data (bytes): the data to include in the request.
headers (dict): the headers to include in the request.
method (str): the method to use for the request, "POST" or "GET".
Returns:
tuple of (int, str): The response status code and the json parsed
body, or the error message.
Raises:
`CliException`: If any issues occur with the URL.
"""
if headers is None:
headers = {}
if self._auth_header is not None:
headers['Authorization'] = self._auth_header
try:
if method == 'POST':
result = requests.post(
url, params=params, data=data, headers=headers)
elif method == 'GET':
result = requests.get(
url, params=params, data=data, headers=headers)
result.raise_for_status()
return (result.status_code, result.json())
except requests.exceptions.HTTPError as e:
return (e.response.status_code, e.response.reason)
except RemoteDisconnected as e:
raise CliException(e)
except (requests.exceptions.MissingSchema,
requests.exceptions.InvalidURL) as e:
raise CliException(e)
except requests.exceptions.ConnectionError as e:
raise CliException(
('Unable to connect to "{}": '
'make sure URL is correct').format(self._base_url)) | Submits the given request, and handles the errors appropriately.
Args:
url (str): the request to send.
params (dict): params to be passed along to get/post
data (bytes): the data to include in the request.
headers (dict): the headers to include in the request.
method (str): the method to use for the request, "POST" or "GET".
Returns:
tuple of (int, str): The response status code and the json parsed
body, or the error message.
Raises:
`CliException`: If any issues occur with the URL. |
def get_suggested_field_names(type_: GraphQLOutputType, field_name: str) -> List[str]:
"""Get a list of suggested field names.
For the field name provided, determine if there are any similar field names that may
be the result of a typo.
"""
if is_object_type(type_) or is_interface_type(type_):
possible_field_names = list(type_.fields) # type: ignore
return suggestion_list(field_name, possible_field_names)
# Otherwise, must be a Union type, which does not define fields.
return [] | Get a list of suggested field names.
For the field name provided, determine if there are any similar field names that may
be the result of a typo. |
def get_tokens_list(self, registry_address: PaymentNetworkID):
"""Returns a list of tokens the node knows about"""
tokens_list = views.get_token_identifiers(
chain_state=views.state_from_raiden(self.raiden),
payment_network_id=registry_address,
)
return tokens_list | Returns a list of tokens the node knows about |
def load_readers(filenames=None, reader=None, reader_kwargs=None,
ppp_config_dir=None):
"""Create specified readers and assign files to them.
Args:
filenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object
should map reader names to a list of filenames for that reader.
reader (str or list): The name of the reader to use for loading the data or a list of names.
reader_kwargs (dict): Keyword arguments to pass to specific reader instances.
ppp_config_dir (str): The directory containing the configuration files for satpy.
Returns: Dictionary mapping reader name to reader instance
"""
reader_instances = {}
reader_kwargs = reader_kwargs or {}
reader_kwargs_without_filter = reader_kwargs.copy()
reader_kwargs_without_filter.pop('filter_parameters', None)
if ppp_config_dir is None:
ppp_config_dir = get_environ_config_dir()
if not filenames and not reader:
# used for an empty Scene
return {}
elif reader and filenames is not None and not filenames:
# user made a mistake in their glob pattern
raise ValueError("'filenames' was provided but is empty.")
elif not filenames:
LOG.warning("'filenames' required to create readers and load data")
return {}
elif reader is None and isinstance(filenames, dict):
# filenames is a dictionary of reader_name -> filenames
reader = list(filenames.keys())
remaining_filenames = set(f for fl in filenames.values() for f in fl)
elif reader and isinstance(filenames, dict):
# filenames is a dictionary of reader_name -> filenames
# but they only want one of the readers
filenames = filenames[reader]
remaining_filenames = set(filenames or [])
else:
remaining_filenames = set(filenames or [])
for idx, reader_configs in enumerate(configs_for_reader(reader, ppp_config_dir)):
if isinstance(filenames, dict):
readers_files = set(filenames[reader[idx]])
else:
readers_files = remaining_filenames
try:
reader_instance = load_reader(reader_configs, **reader_kwargs)
except (KeyError, IOError, yaml.YAMLError) as err:
LOG.info('Cannot use %s', str(reader_configs))
LOG.debug(str(err))
continue
if readers_files:
loadables = reader_instance.select_files_from_pathnames(readers_files)
if loadables:
reader_instance.create_filehandlers(loadables, fh_kwargs=reader_kwargs_without_filter)
reader_instances[reader_instance.name] = reader_instance
remaining_filenames -= set(loadables)
if not remaining_filenames:
break
if remaining_filenames:
LOG.warning("Don't know how to open the following files: {}".format(str(remaining_filenames)))
if not reader_instances:
raise ValueError("No supported files found")
elif not any(list(r.available_dataset_ids) for r in reader_instances.values()):
raise ValueError("No dataset could be loaded. Either missing "
"requirements (such as Epilog, Prolog) or none of the "
"provided files match the filter parameters.")
return reader_instances | Create specified readers and assign files to them.
Args:
filenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object
should map reader names to a list of filenames for that reader.
reader (str or list): The name of the reader to use for loading the data or a list of names.
reader_kwargs (dict): Keyword arguments to pass to specific reader instances.
ppp_config_dir (str): The directory containing the configuration files for satpy.
Returns: Dictionary mapping reader name to reader instance |
def disable_scanners_by_ids(self, scanner_ids):
"""Disable a list of scanner IDs."""
scanner_ids = ','.join(scanner_ids)
self.logger.debug('Disabling scanners with IDs {0}'.format(scanner_ids))
return self.zap.ascan.disable_scanners(scanner_ids) | Disable a list of scanner IDs. |
def add_audio(self,
customization_id,
audio_name,
audio_resource,
contained_content_type=None,
allow_overwrite=None,
content_type=None,
**kwargs):
"""
Add an audio resource.
Adds an audio resource to a custom acoustic model. Add audio content that reflects
the acoustic characteristics of the audio that you plan to transcribe. You must
use credentials for the instance of the service that owns a model to add an audio
resource to it. Adding audio data does not affect the custom acoustic model until
you train the model for the new data by using the **Train a custom acoustic
model** method.
You can add individual audio files or an archive file that contains multiple audio
files. Adding multiple audio files via a single archive file is significantly more
efficient than adding each file individually. You can add audio resources in any
format that the service supports for speech recognition.
You can use this method to add any number of audio resources to a custom model by
calling the method once for each audio or archive file. But the addition of one
audio resource must be fully complete before you can add another. You must add a
minimum of 10 minutes and a maximum of 100 hours of audio that includes speech,
not just silence, to a custom acoustic model before you can train it. No audio
resource, audio- or archive-type, can be larger than 100 MB. To add an audio
resource that has the same name as an existing audio resource, set the
`allow_overwrite` parameter to `true`; otherwise, the request fails.
The method is asynchronous. It can take several seconds to complete depending on
the duration of the audio and, in the case of an archive file, the total number of
audio files being processed. The service returns a 201 response code if the audio
is valid. It then asynchronously analyzes the contents of the audio file or files
and automatically extracts information about the audio such as its length,
sampling rate, and encoding. You cannot submit requests to add additional audio
resources to a custom acoustic model, or to train the model, until the service's
analysis of all audio files for the current request completes.
To determine the status of the service's analysis of the audio, use the **Get an
audio resource** method to poll the status of the audio. The method accepts the
customization ID of the custom model and the name of the audio resource, and it
returns the status of the resource. Use a loop to check the status of the audio
every few seconds until it becomes `ok`.
**See also:** [Add audio to the custom acoustic
model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#addAudio).
### Content types for audio-type resources
You can add an individual audio file in any format that the service supports for
speech recognition. For an audio-type resource, use the `Content-Type` parameter
to specify the audio format (MIME type) of the audio file, including specifying
the sampling rate, channels, and endianness where indicated.
* `audio/alaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of
channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of an audio file must match the sampling rate of the base model
for the custom model: for broadband models, at least 16 kHz; for narrowband
models, at least 8 kHz. If the sampling rate of the audio is higher than the
minimum required rate, the service down-samples the audio to the appropriate rate.
If the sampling rate of the audio is lower than the minimum required rate, the
service labels the audio file as `invalid`.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Content types for archive-type resources
You can add an archive file (**.zip** or **.tar.gz** file) that contains audio
files in any format that the service supports for speech recognition. For an
archive-type resource, use the `Content-Type` parameter to specify the media type
of the archive file:
* `application/zip` for a **.zip** file
* `application/gzip` for a **.tar.gz** file.
When you add an archive-type resource, the `Contained-Content-Type` header is
optional depending on the format of the files that you are adding:
* For audio files of type `audio/alaw`, `audio/basic`, `audio/l16`, or
`audio/mulaw`, you must use the `Contained-Content-Type` header to specify the
format of the contained audio files. Include the `rate`, `channels`, and
`endianness` parameters where necessary. In this case, all audio files contained
in the archive file must have the same audio format.
* For audio files of all other types, you can omit the `Contained-Content-Type`
header. In this case, the audio files contained in the archive file can have any
of the formats not listed in the previous bullet. The audio files do not need to
have the same format.
Do not use the `Contained-Content-Type` header when adding an audio-type resource.
### Naming restrictions for embedded audio files
The name of an audio file that is embedded within an archive-type resource must
meet the following restrictions:
* Include a maximum of 128 characters in the file name; this includes the file
extension.
* Do not include spaces, slashes, or backslashes in the file name.
* Do not use the name of an audio file that has already been added to the custom
model as part of an archive-type resource.
:param str customization_id: The customization ID (GUID) of the custom acoustic
model that is to be used for the request. You must make the request with
credentials for the instance of the service that owns the custom model.
:param str audio_name: The name of the new audio resource for the custom acoustic
model. Use a localized name that matches the language of the custom model and
reflects the contents of the resource.
* Include a maximum of 128 characters in the name.
* Do not include spaces, slashes, or backslashes in the name.
* Do not use the name of an audio resource that has already been added to the
custom model.
:param file audio_resource: The audio resource that is to be added to the custom
acoustic model, an individual audio file or an archive file.
:param str contained_content_type: **For an archive-type resource,** specify the
format of the audio files that are contained in the archive file if they are of
type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the
`rate`, `channels`, and `endianness` parameters where necessary. In this case, all
audio files that are contained in the archive file must be of the indicated type.
For all other audio formats, you can omit the header. In this case, the audio
files can be of multiple types as long as they are not of the types listed in the
previous paragraph.
The parameter accepts all of the audio formats that are supported for use with
speech recognition. For more information, see **Content types for audio-type
resources** in the method description.
**For an audio-type resource,** omit the header.
:param bool allow_overwrite: If `true`, the specified audio resource overwrites an
existing audio resource with the same name. If `false`, the request fails if an
audio resource with the same name already exists. The parameter has no effect if
an audio resource with the same name does not already exist.
:param str content_type: For an audio-type resource, the format (MIME type) of the
audio. For more information, see **Content types for audio-type resources** in the
method description.
For an archive-type resource, the media type of the archive file. For more
information, see **Content types for archive-type resources** in the method
description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if customization_id is None:
raise ValueError('customization_id must be provided')
if audio_name is None:
raise ValueError('audio_name must be provided')
if audio_resource is None:
raise ValueError('audio_resource must be provided')
headers = {
'Contained-Content-Type': contained_content_type,
'Content-Type': content_type
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('speech_to_text', 'V1', 'add_audio')
headers.update(sdk_headers)
params = {'allow_overwrite': allow_overwrite}
data = audio_resource
url = '/v1/acoustic_customizations/{0}/audio/{1}'.format(
*self._encode_path_vars(customization_id, audio_name))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response | Add an audio resource.
Adds an audio resource to a custom acoustic model. Add audio content that reflects
the acoustic characteristics of the audio that you plan to transcribe. You must
use credentials for the instance of the service that owns a model to add an audio
resource to it. Adding audio data does not affect the custom acoustic model until
you train the model for the new data by using the **Train a custom acoustic
model** method.
You can add individual audio files or an archive file that contains multiple audio
files. Adding multiple audio files via a single archive file is significantly more
efficient than adding each file individually. You can add audio resources in any
format that the service supports for speech recognition.
You can use this method to add any number of audio resources to a custom model by
calling the method once for each audio or archive file. But the addition of one
audio resource must be fully complete before you can add another. You must add a
minimum of 10 minutes and a maximum of 100 hours of audio that includes speech,
not just silence, to a custom acoustic model before you can train it. No audio
resource, audio- or archive-type, can be larger than 100 MB. To add an audio
resource that has the same name as an existing audio resource, set the
`allow_overwrite` parameter to `true`; otherwise, the request fails.
The method is asynchronous. It can take several seconds to complete depending on
the duration of the audio and, in the case of an archive file, the total number of
audio files being processed. The service returns a 201 response code if the audio
is valid. It then asynchronously analyzes the contents of the audio file or files
and automatically extracts information about the audio such as its length,
sampling rate, and encoding. You cannot submit requests to add additional audio
resources to a custom acoustic model, or to train the model, until the service's
analysis of all audio files for the current request completes.
To determine the status of the service's analysis of the audio, use the **Get an
audio resource** method to poll the status of the audio. The method accepts the
customization ID of the custom model and the name of the audio resource, and it
returns the status of the resource. Use a loop to check the status of the audio
every few seconds until it becomes `ok`.
**See also:** [Add audio to the custom acoustic
model](https://cloud.ibm.com/docs/services/speech-to-text/acoustic-create.html#addAudio).
### Content types for audio-type resources
You can add an individual audio file in any format that the service supports for
speech recognition. For an audio-type resource, use the `Content-Type` parameter
to specify the audio format (MIME type) of the audio file, including specifying
the sampling rate, channels, and endianness where indicated.
* `audio/alaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/basic` (Use only with narrowband models.)
* `audio/flac`
* `audio/g729` (Use only with narrowband models.)
* `audio/l16` (Specify the sampling rate (`rate`) and optionally the number of
channels (`channels`) and endianness (`endianness`) of the audio.)
* `audio/mp3`
* `audio/mpeg`
* `audio/mulaw` (Specify the sampling rate (`rate`) of the audio.)
* `audio/ogg` (The service automatically detects the codec of the input audio.)
* `audio/ogg;codecs=opus`
* `audio/ogg;codecs=vorbis`
* `audio/wav` (Provide audio with a maximum of nine channels.)
* `audio/webm` (The service automatically detects the codec of the input audio.)
* `audio/webm;codecs=opus`
* `audio/webm;codecs=vorbis`
The sampling rate of an audio file must match the sampling rate of the base model
for the custom model: for broadband models, at least 16 kHz; for narrowband
models, at least 8 kHz. If the sampling rate of the audio is higher than the
minimum required rate, the service down-samples the audio to the appropriate rate.
If the sampling rate of the audio is lower than the minimum required rate, the
service labels the audio file as `invalid`.
**See also:** [Audio
formats](https://cloud.ibm.com/docs/services/speech-to-text/audio-formats.html).
### Content types for archive-type resources
You can add an archive file (**.zip** or **.tar.gz** file) that contains audio
files in any format that the service supports for speech recognition. For an
archive-type resource, use the `Content-Type` parameter to specify the media type
of the archive file:
* `application/zip` for a **.zip** file
* `application/gzip` for a **.tar.gz** file.
When you add an archive-type resource, the `Contained-Content-Type` header is
optional depending on the format of the files that you are adding:
* For audio files of type `audio/alaw`, `audio/basic`, `audio/l16`, or
`audio/mulaw`, you must use the `Contained-Content-Type` header to specify the
format of the contained audio files. Include the `rate`, `channels`, and
`endianness` parameters where necessary. In this case, all audio files contained
in the archive file must have the same audio format.
* For audio files of all other types, you can omit the `Contained-Content-Type`
header. In this case, the audio files contained in the archive file can have any
of the formats not listed in the previous bullet. The audio files do not need to
have the same format.
Do not use the `Contained-Content-Type` header when adding an audio-type resource.
### Naming restrictions for embedded audio files
The name of an audio file that is embedded within an archive-type resource must
meet the following restrictions:
* Include a maximum of 128 characters in the file name; this includes the file
extension.
* Do not include spaces, slashes, or backslashes in the file name.
* Do not use the name of an audio file that has already been added to the custom
model as part of an archive-type resource.
:param str customization_id: The customization ID (GUID) of the custom acoustic
model that is to be used for the request. You must make the request with
credentials for the instance of the service that owns the custom model.
:param str audio_name: The name of the new audio resource for the custom acoustic
model. Use a localized name that matches the language of the custom model and
reflects the contents of the resource.
* Include a maximum of 128 characters in the name.
* Do not include spaces, slashes, or backslashes in the name.
* Do not use the name of an audio resource that has already been added to the
custom model.
:param file audio_resource: The audio resource that is to be added to the custom
acoustic model, an individual audio file or an archive file.
:param str contained_content_type: **For an archive-type resource,** specify the
format of the audio files that are contained in the archive file if they are of
type `audio/alaw`, `audio/basic`, `audio/l16`, or `audio/mulaw`. Include the
`rate`, `channels`, and `endianness` parameters where necessary. In this case, all
audio files that are contained in the archive file must be of the indicated type.
For all other audio formats, you can omit the header. In this case, the audio
files can be of multiple types as long as they are not of the types listed in the
previous paragraph.
The parameter accepts all of the audio formats that are supported for use with
speech recognition. For more information, see **Content types for audio-type
resources** in the method description.
**For an audio-type resource,** omit the header.
:param bool allow_overwrite: If `true`, the specified audio resource overwrites an
existing audio resource with the same name. If `false`, the request fails if an
audio resource with the same name already exists. The parameter has no effect if
an audio resource with the same name does not already exist.
:param str content_type: For an audio-type resource, the format (MIME type) of the
audio. For more information, see **Content types for audio-type resources** in the
method description.
For an archive-type resource, the media type of the archive file. For more
information, see **Content types for archive-type resources** in the method
description.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse |
def write(self, data, mode='w'):
"""
Write data to the file.
`data` is the data to write
`mode` is the mode argument to pass to `open()`
"""
with open(self.path, mode) as f:
f.write(data) | Write data to the file.
`data` is the data to write
`mode` is the mode argument to pass to `open()` |
def coneSearch(self, center, radius=3*u.arcmin, magnitudelimit=25):
'''
Run a cone search of the GALEX archive
'''
self.magnitudelimit = magnitudelimit
# run the query
self.speak('querying GALEX, centered on {} with radius {}'.format(center, radius, magnitudelimit))
coordinatetosearch = '{0.ra.deg} {0.dec.deg}'.format(center)
table = astroquery.mast.Catalogs.query_region(coordinates=center, radius=radius, catalog='GALEX')
# the gaia DR2 epoch is 2015.5
epoch = 2005#???
# create skycoord objects
self.coordinates = coord.SkyCoord( ra=table['ra'].data*u.deg,
dec=table['dec'].data*u.deg,
obstime=Time(epoch, format='decimalyear'))
self.magnitudes = dict(NUV=table['nuv_mag'].data, FUV=table['fuv_mag'].data)
self.magnitude = self.magnitudes['NUV'] | Run a cone search of the GALEX archive |
def pretty(price, currency, *, abbrev=True, trim=True):
""" return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument """
currency = validate_currency(currency)
price = validate_price(price)
space = '' if nospace(currency) else ' '
fmtstr = ''
if trim:
fmtstr = '{:0,.{x}f}'.format(price, x=decimals(currency)).rstrip('0').rstrip('.')
else:
fmtstr = '{:0,}'.format(price).rstrip('0').rstrip('.')
if abbrev: # use currency symbol
if issuffix(currency):
return fmtstr + space + symbol(currency)
return symbol(currency, native=False) + space + fmtstr
return fmtstr + ' ' + code(currency) | return format price with symbol. Example format(100, 'USD') return '$100'
pretty(price, currency, abbrev=True, trim=False)
abbrev:
True: print value + symbol. Symbol can either be placed before or after value
False: print value + currency code. currency code is placed behind value
trim:
True: trim float value to the maximum digit numbers of that currency
False: keep number of decimal in initial argument |
def refresh_all_states(self):
"""Update all states."""
header = BASE_HEADERS.copy()
header['Cookie'] = self.__cookie
request = requests.get(
BASE_URL + "refreshAllStates", headers=header, timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.refresh_all_states()
return | Update all states. |
def text(self, text):
'''
.. seealso:: :attr:`text`
'''
if text:
if not isinstance(text, str):
text = _pformat(text)
text += '\n\n'
self.m(
'add text to mail',
more=dict(len=len(text))
)
self.__message.attach(_MIMEText(text, 'plain', 'UTF-8')) | .. seealso:: :attr:`text` |
def add_alias(self, alias):
"""Add an alias to the index set in the elastic obj
:param alias: alias to add
:returns: None
"""
aliases = self.list_aliases()
if alias in aliases:
logger.debug("Alias %s already exists on %s.", alias, self.anonymize_url(self.index_url))
return
# add alias
alias_data = """
{
"actions": [
{
"add": {
"index": "%s",
"alias": "%s"
}
}
]
}
""" % (self.index, alias)
r = self.requests.post(self.url + "/_aliases", headers=HEADER_JSON, verify=False, data=alias_data)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.warning("Something went wrong when adding an alias on %s. Alias not set.",
self.anonymize_url(self.index_url))
logger.warning(ex)
return
logger.info("Alias %s created on %s.", alias, self.anonymize_url(self.index_url)) | Add an alias to the index set in the elastic obj
:param alias: alias to add
:returns: None |
def _get_cl_dependency_code(self):
"""Get the CL code for all the CL code for all the dependencies.
Returns:
str: The CL code with the actual code.
"""
code = ''
for d in self._dependencies:
code += d.get_cl_code() + "\n"
return code | Get the CL code for all the CL code for all the dependencies.
Returns:
str: The CL code with the actual code. |
def get_checkcode(cls, id_number_str):
"""
计算身份证号码的校验位;
:param:
* id_number_str: (string) 身份证号的前17位,比如 3201241987010100
:returns:
* 返回类型 (tuple)
* flag: (bool) 如果身份证号格式正确,返回 True;格式错误,返回 False
* checkcode: 计算身份证前17位的校验码
举例如下::
from fishbase.fish_data import *
print('--- fish_data get_checkcode demo ---')
# id number
id1 = '32012419870101001'
print(id1, IdCard.get_checkcode(id1)[1])
# id number
id2 = '13052219840731647'
print(id2, IdCard.get_checkcode(id2)[1])
print('---')
输出结果::
--- fish_data get_checkcode demo ---
32012419870101001 5
13052219840731647 1
---
"""
# 判断长度,如果不是 17 位,直接返回失败
if len(id_number_str) != 17:
return False, -1
id_regex = '[1-9][0-9]{14}([0-9]{2}[0-9X])?'
if not re.match(id_regex, id_number_str):
return False, -1
items = [int(item) for item in id_number_str]
# 加权因子表
factors = (7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2)
# 计算17位数字各位数字与对应的加权因子的乘积
copulas = sum([a * b for a, b in zip(factors, items)])
# 校验码表
check_codes = ('1', '0', 'X', '9', '8', '7', '6', '5', '4', '3', '2')
checkcode = check_codes[copulas % 11].upper()
return True, checkcode | 计算身份证号码的校验位;
:param:
* id_number_str: (string) 身份证号的前17位,比如 3201241987010100
:returns:
* 返回类型 (tuple)
* flag: (bool) 如果身份证号格式正确,返回 True;格式错误,返回 False
* checkcode: 计算身份证前17位的校验码
举例如下::
from fishbase.fish_data import *
print('--- fish_data get_checkcode demo ---')
# id number
id1 = '32012419870101001'
print(id1, IdCard.get_checkcode(id1)[1])
# id number
id2 = '13052219840731647'
print(id2, IdCard.get_checkcode(id2)[1])
print('---')
输出结果::
--- fish_data get_checkcode demo ---
32012419870101001 5
13052219840731647 1
--- |
def namedlist(objname, fieldnames):
'like namedtuple but editable'
class NamedListTemplate(list):
__name__ = objname
_fields = fieldnames
def __init__(self, L=None, **kwargs):
if L is None:
L = [None]*len(fieldnames)
super().__init__(L)
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def length(cls):
return len(cls._fields)
for i, attrname in enumerate(fieldnames):
# create property getter/setter for each field
setattr(NamedListTemplate, attrname, property(operator.itemgetter(i), itemsetter(i)))
return NamedListTemplate | like namedtuple but editable |
def InitializeNoPrompt(config=None,
external_hostname = None,
admin_password = None,
mysql_hostname = None,
mysql_port = None,
mysql_username = None,
mysql_password = None,
mysql_db = None,
mysql_client_key_path = None,
mysql_client_cert_path = None,
mysql_ca_cert_path = None,
redownload_templates = False,
repack_templates = True,
token = None):
"""Initialize GRR with no prompts.
Args:
config: config object
external_hostname: A hostname.
admin_password: A password used for the admin user.
mysql_hostname: A hostname used for establishing connection to MySQL.
mysql_port: A port used for establishing connection to MySQL.
mysql_username: A username used for establishing connection to MySQL.
mysql_password: A password used for establishing connection to MySQL.
mysql_db: Name of the MySQL database to use.
mysql_client_key_path: The path name of the client private key file.
mysql_client_cert_path: The path name of the client public key certificate.
mysql_ca_cert_path: The path name of the CA certificate file.
redownload_templates: Indicates whether templates should be re-downloaded.
repack_templates: Indicates whether templates should be re-packed.
token: auth token
Raises:
ValueError: if required flags are not provided, or if the config has
already been initialized.
IOError: if config is not writeable
ConfigInitError: if GRR is unable to connect to a running MySQL instance.
This method does the minimum work necessary to configure GRR without any user
prompting, relying heavily on config default values. User must supply the
external hostname, admin password, and MySQL password; everything else is set
automatically.
"""
if config["Server.initialized"]:
raise ValueError("Config has already been initialized.")
if not external_hostname:
raise ValueError(
"--noprompt set, but --external_hostname was not provided.")
if not admin_password:
raise ValueError("--noprompt set, but --admin_password was not provided.")
if mysql_password is None:
raise ValueError("--noprompt set, but --mysql_password was not provided.")
print("Checking write access on config %s" % config.parser)
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
config_dict = {}
config_dict["Datastore.implementation"] = "MySQLAdvancedDataStore"
config_dict["Mysql.host"] = mysql_hostname or config["Mysql.host"]
config_dict["Mysql.port"] = mysql_port or config["Mysql.port"]
config_dict["Mysql.database_name"] = mysql_db or config["Mysql.database_name"]
config_dict["Mysql.database_username"] = (
mysql_username or config["Mysql.database_username"])
config_dict["Client.server_urls"] = [
"http://%s:%s/" % (external_hostname, config["Frontend.bind_port"])
]
config_dict["AdminUI.url"] = "http://%s:%s" % (external_hostname,
config["AdminUI.port"])
config_dict["Logging.domain"] = external_hostname
config_dict["Monitoring.alert_email"] = ("grr-monitoring@%s" %
external_hostname)
config_dict["Monitoring.emergency_access_email"] = ("grr-emergency@%s" %
external_hostname)
# Print all configuration options, except for the MySQL password.
print("Setting configuration as:\n\n%s" % config_dict)
config_dict["Mysql.database_password"] = mysql_password
if mysql_client_key_path is not None:
config_dict["Mysql.client_key_path"] = mysql_client_key_path
config_dict["Mysql.client_cert_path"] = mysql_client_cert_path
config_dict["Mysql.ca_cert_path"] = mysql_ca_cert_path
if CheckMySQLConnection(config_dict):
print("Successfully connected to MySQL with the given configuration.")
else:
print("Error: Could not connect to MySQL with the given configuration.")
raise ConfigInitError()
for key, value in iteritems(config_dict):
config.Set(key, value)
config_updater_keys_util.GenerateKeys(config)
FinalizeConfigInit(
config,
token,
admin_password=admin_password,
redownload_templates=redownload_templates,
repack_templates=repack_templates,
prompt=False) | Initialize GRR with no prompts.
Args:
config: config object
external_hostname: A hostname.
admin_password: A password used for the admin user.
mysql_hostname: A hostname used for establishing connection to MySQL.
mysql_port: A port used for establishing connection to MySQL.
mysql_username: A username used for establishing connection to MySQL.
mysql_password: A password used for establishing connection to MySQL.
mysql_db: Name of the MySQL database to use.
mysql_client_key_path: The path name of the client private key file.
mysql_client_cert_path: The path name of the client public key certificate.
mysql_ca_cert_path: The path name of the CA certificate file.
redownload_templates: Indicates whether templates should be re-downloaded.
repack_templates: Indicates whether templates should be re-packed.
token: auth token
Raises:
ValueError: if required flags are not provided, or if the config has
already been initialized.
IOError: if config is not writeable
ConfigInitError: if GRR is unable to connect to a running MySQL instance.
This method does the minimum work necessary to configure GRR without any user
prompting, relying heavily on config default values. User must supply the
external hostname, admin password, and MySQL password; everything else is set
automatically. |
def connection_made(self, address):
"""When a connection is made the proxy is available."""
self._proxy = PickleProxy(self.loop, self)
for d in self._proxy_deferreds:
d.callback(self._proxy) | When a connection is made the proxy is available. |
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters) | Restoring cluster from a given hbase snapshot id |
def get_mathjax_header(https=False):
"""
Return the snippet of HTML code to put in HTML HEAD tag, in order to
enable MathJax support.
@param https: when using the CDN, whether to use the HTTPS URL rather
than the HTTP one.
@type https: bool
@note: with new releases of MathJax, update this function toghether with
$MJV variable in the root Makefile.am
"""
if cfg['CFG_MATHJAX_HOSTING'].lower() == 'cdn':
if https:
mathjax_path = "https://d3eoax9i5htok0.cloudfront.net/mathjax/2.1-latest"
else:
mathjax_path = "http://cdn.mathjax.org/mathjax/2.1-latest"
else:
mathjax_path = "/vendors/MathJax"
if cfg['CFG_MATHJAX_RENDERS_MATHML']:
mathjax_config = "TeX-AMS-MML_HTMLorMML"
else:
mathjax_config = "TeX-AMS_HTML"
return """<script type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {inlineMath: [['$','$']],
processEscapes: true},
showProcessingMessages: false,
messageStyle: "none"
});
</script>
<script src="%(mathjax_path)s/MathJax.js?config=%(mathjax_config)s" type="text/javascript">
</script>""" % {
'mathjax_path': mathjax_path,
'mathjax_config': mathjax_config,
} | Return the snippet of HTML code to put in HTML HEAD tag, in order to
enable MathJax support.
@param https: when using the CDN, whether to use the HTTPS URL rather
than the HTTP one.
@type https: bool
@note: with new releases of MathJax, update this function toghether with
$MJV variable in the root Makefile.am |
def arcs(self):
"""Get information about the arcs available in the code.
Returns a sorted list of line number pairs. Line numbers have been
normalized to the first line of multiline statements.
"""
all_arcs = []
for l1, l2 in self.byte_parser._all_arcs():
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
all_arcs.append((fl1, fl2))
return sorted(all_arcs) | Get information about the arcs available in the code.
Returns a sorted list of line number pairs. Line numbers have been
normalized to the first line of multiline statements. |
def parse_dom(dom):
"""Parse dom into a Graph.
:param dom: dom as returned by minidom.parse or minidom.parseString
:return: A Graph representation
"""
root = dom.getElementsByTagName("graphml")[0]
graph = root.getElementsByTagName("graph")[0]
name = graph.getAttribute('id')
g = Graph(name)
# # Get attributes
# attributes = []
# for attr in root.getElementsByTagName("key"):
# attributes.append(attr)
# Get nodes
for node in graph.getElementsByTagName("node"):
n = g.add_node(id=node.getAttribute('id'))
for attr in node.getElementsByTagName("data"):
if attr.firstChild:
n[attr.getAttribute("key")] = attr.firstChild.data
else:
n[attr.getAttribute("key")] = ""
# Get edges
for edge in graph.getElementsByTagName("edge"):
source = edge.getAttribute('source')
dest = edge.getAttribute('target')
# source/target attributes refer to IDs: http://graphml.graphdrawing.org/xmlns/1.1/graphml-structure.xsd
e = g.add_edge_by_id(source, dest)
for attr in edge.getElementsByTagName("data"):
if attr.firstChild:
e[attr.getAttribute("key")] = attr.firstChild.data
else:
e[attr.getAttribute("key")] = ""
return g | Parse dom into a Graph.
:param dom: dom as returned by minidom.parse or minidom.parseString
:return: A Graph representation |
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
"""
raw = kwargs.pop('raw',False)
if raw:
for obj in objs:
publish_png(obj)
else:
display(*objs, include=['text/plain','image/png']) | Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False] |
def get_instance(self, payload):
"""
Build an instance of AuthRegistrationsCredentialListMappingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance
"""
return AuthRegistrationsCredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
) | Build an instance of AuthRegistrationsCredentialListMappingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_registrations_mapping.auth_registrations_credential_list_mapping.AuthRegistrationsCredentialListMappingInstance |
def deleted(self):
"""Get the deleted state.
Returns:
bool: Whether this item is deleted.
"""
return self.timestamps.deleted is not None and self.timestamps.deleted > NodeTimestamps.int_to_dt(0) | Get the deleted state.
Returns:
bool: Whether this item is deleted. |
def input(self, data):
"""Reset the lexer and feed in new input.
:param data:
String of input data.
"""
# input(..) doesn't reset the lineno. We have to do that manually.
self._lexer.lineno = 1
return self._lexer.input(data) | Reset the lexer and feed in new input.
:param data:
String of input data. |
def on_excepthandler(self, node): # ('type', 'name', 'body')
"""Exception handler..."""
return (self.run(node.type), node.name, node.body) | Exception handler... |
def _get_on_crash(dom):
'''
Return `on_crash` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_crash <domain>
'''
node = ElementTree.fromstring(get_xml(dom)).find('on_crash')
return node.text if node is not None else '' | Return `on_crash` setting from the named vm
CLI Example:
.. code-block:: bash
salt '*' virt.get_on_crash <domain> |
def get_issue_remotelinks(self, issue_key, global_id=None, internal_id=None):
"""
Compatibility naming method with get_issue_remote_links()
"""
return self.get_issue_remote_links(issue_key, global_id, internal_id) | Compatibility naming method with get_issue_remote_links() |
def resolve_url(url, directory=None, permissions=None):
"""
Resolves a URL to a local file, and returns the path to
that file.
"""
u = urlparse(url)
# create the name of the destination file
if directory is None:
directory = os.getcwd()
filename = os.path.join(directory,os.path.basename(u.path))
if u.scheme == '' or u.scheme == 'file':
# for regular files, make a direct copy
if os.path.isfile(u.path):
if os.path.isfile(filename):
# check to see if src and dest are the same file
src_inode = os.stat(u.path)[stat.ST_INO]
dst_inode = os.stat(filename)[stat.ST_INO]
if src_inode != dst_inode:
shutil.copy(u.path, filename)
else:
shutil.copy(u.path, filename)
else:
errmsg = "Cannot open file %s from URL %s" % (u.path, url)
raise ValueError(errmsg)
elif u.scheme == 'http' or u.scheme == 'https':
s = requests.Session()
s.mount(str(u.scheme)+'://',
requests.adapters.HTTPAdapter(max_retries=5))
# look for an ecp cookie file and load the cookies
cookie_dict = {}
ecp_file = '/tmp/ecpcookie.u%d' % os.getuid()
if os.path.isfile(ecp_file):
cj = cookielib.MozillaCookieJar()
cj.load(ecp_file, ignore_discard=True, ignore_expires=True)
else:
cj = []
for c in cj:
if c.domain == u.netloc:
# load cookies for this server
cookie_dict[c.name] = c.value
elif u.netloc == "code.pycbc.phy.syr.edu" and \
c.domain == "git.ligo.org":
# handle the redirect for code.pycbc to git.ligo.org
cookie_dict[c.name] = c.value
r = s.get(url, cookies=cookie_dict, allow_redirects=True)
if r.status_code != 200:
errmsg = "Unable to download %s\nError code = %d" % (url,
r.status_code)
raise ValueError(errmsg)
# if we are downloading from git.ligo.org, check that we
# did not get redirected to the sign-in page
if u.netloc == 'git.ligo.org' or u.netloc == 'code.pycbc.phy.syr.edu':
# Check if we have downloaded a binary file.
if istext(r.content):
soup = BeautifulSoup(r.content, 'html.parser')
desc = soup.findAll(attrs={"property":"og:url"})
if len(desc) and \
desc[0]['content'] == 'https://git.ligo.org/users/sign_in':
raise ValueError(ecp_cookie_error.format(url))
output_fp = open(filename, 'w')
output_fp.write(r.content)
output_fp.close()
else:
# TODO: We could support other schemes such as gsiftp by
# calling out to globus-url-copy
errmsg = "Unknown URL scheme: %s\n" % (u.scheme)
errmsg += "Currently supported are: file, http, and https."
raise ValueError(errmsg)
if not os.path.isfile(filename):
errmsg = "Error trying to create file %s from %s" % (filename,url)
raise ValueError(errmsg)
if permissions:
if os.access(filename, os.W_OK):
os.chmod(filename, permissions)
else:
# check that the file has at least the permissions requested
s = os.stat(filename)[stat.ST_MODE]
if (s & permissions) != permissions:
errmsg = "Could not change permissions on %s (read-only)" % url
raise ValueError(errmsg)
return filename | Resolves a URL to a local file, and returns the path to
that file. |
def handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period):
"""
This function handles the twitter request in case of an HTTP error.
Inputs: - e: A twython.TwythonError instance to be handled.
- error_count: Number of failed retries of the call until now.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Raises: - twython.TwythonError
"""
if e.error_code == 401:
# Encountered 401 Error (Not Authorized)
raise e
elif e.error_code == 404:
# Encountered 404 Error (Not Found)
raise e
elif e.error_code == 429:
# Encountered 429 Error (Rate Limit Exceeded)
# Sleep for 15 minutes
error_count += 0.5
call_counter = 0
wait_period = 2
time.sleep(60*15 + 5)
time_window_start = time.perf_counter()
return error_count, call_counter, time_window_start, wait_period
elif e.error_code in (500, 502, 503, 504):
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
return error_count, call_counter, time_window_start, wait_period
else:
raise e | This function handles the twitter request in case of an HTTP error.
Inputs: - e: A twython.TwythonError instance to be handled.
- error_count: Number of failed retries of the call until now.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Raises: - twython.TwythonError |
def getTextualNode(self, textId, subreference=None, prevnext=False, metadata=False):
""" Retrieve a text node from the API
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: CapitainsCtsPassage
:rtype: CapitainsCtsPassage
"""
text = CtsText(
urn=textId,
retriever=self.endpoint
)
if metadata or prevnext:
return text.getPassagePlus(reference=subreference)
else:
return text.getTextualNode(subreference=subreference) | Retrieve a text node from the API
:param textId: CtsTextMetadata Identifier
:type textId: str
:param subreference: CapitainsCtsPassage Reference
:type subreference: str
:param prevnext: Retrieve graph representing previous and next passage
:type prevnext: boolean
:param metadata: Retrieve metadata about the passage and the text
:type metadata: boolean
:return: CapitainsCtsPassage
:rtype: CapitainsCtsPassage |
def output_colored(code, text, is_bold=False):
"""
Create function to output with color sequence
"""
if is_bold:
code = '1;%s' % code
return '\033[%sm%s\033[0m' % (code, text) | Create function to output with color sequence |
def fromhdf5(source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
"""
Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5file.flush()
>>> h5file.close()
>>> #
... # now demonstrate use of fromhdf5
... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable')
>>> table1
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable')
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5')
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable')
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable')
>>> table1 = etl.fromhdf5(h5tbl)
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close()
"""
return HDF5View(source, where=where, name=name,
condition=condition, condvars=condvars,
start=start, stop=stop, step=step) | Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5file.flush()
>>> h5file.close()
>>> #
... # now demonstrate use of fromhdf5
... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable')
>>> table1
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable')
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5')
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable')
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable')
>>> table1 = etl.fromhdf5(h5tbl)
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close() |
def load_array_elements(self, array, start_idx, no_of_elements):
"""
Loads either a single element or a range of elements from the array.
:param array: Reference to the array.
:param start_idx: Starting index for the load.
:param no_of_elements: Number of elements to load.
"""
# concretize start index
concrete_start_idxes = self.concretize_load_idx(start_idx)
if len(concrete_start_idxes) == 1:
# only one start index
# => concrete load
concrete_start_idx = concrete_start_idxes[0]
load_values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
# if the index was symbolic before concretization, this
# constraint it to concrete start idx
self.state.solver.add(start_idx == concrete_start_idx)
else:
# multiple start indexes
# => symbolic load
# start with load values for the first concrete index
concrete_start_idx = concrete_start_idxes[0]
load_values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
start_idx_options = [concrete_start_idx == start_idx]
# update load values with all remaining start indexes
for concrete_start_idx in concrete_start_idxes[1:]:
# load values for this start index
values = [self._load_array_element_from_heap(array, idx)
for idx in range(concrete_start_idx, concrete_start_idx+no_of_elements)]
# update load values with the new ones
for i, value in enumerate(values):
# condition every value with the start idx
# => if concrete_start_idx == start_idx
# then use new value
# else use the current value
load_values[i] = self.state.solver.If(
concrete_start_idx == start_idx,
value,
load_values[i]
)
start_idx_options.append(start_idx == concrete_start_idx)
# constraint start_idx, s.t. it evals to one of the concretized indexes
constraint_on_start_idx = self.state.solver.Or(*start_idx_options)
self.state.add_constraints(constraint_on_start_idx)
return load_values | Loads either a single element or a range of elements from the array.
:param array: Reference to the array.
:param start_idx: Starting index for the load.
:param no_of_elements: Number of elements to load. |
def _get(self, scheme, host, port, path, assert_key=None):
"""
Execute a ES API call. Convert response into JSON and
optionally assert its structure.
"""
url = '%s://%s:%i/%s' % (scheme, host, port, path)
try:
request = urllib2.Request(url)
if self.config['user'] and self.config['password']:
base64string = base64.standard_b64encode(
'%s:%s' % (self.config['user'], self.config['password']))
request.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(request)
except Exception as err:
self.log.error("%s: %s" % (url, err))
return False
try:
doc = json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a" +
" json object")
return False
if assert_key and assert_key not in doc:
self.log.error("Bad response from elasticsearch, expected key "
"'%s' was missing for %s" % (assert_key, url))
return False
return doc | Execute a ES API call. Convert response into JSON and
optionally assert its structure. |
def export(self, class_name, method_name, export_data=False,
export_dir='.', export_filename='data.json',
export_append_checksum=False, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
self.n_classes = len(est.classes_)
self.n_features = len(est.feature_log_prob_[0])
temp_type = self.temp('type')
temp_arr = self.temp('arr')
temp_arr_ = self.temp('arr[]')
temp_arr__ = self.temp('arr[][]')
# Create class prior probabilities:
priors = [self.temp('type').format(self.repr(p)) for p in
est.class_log_prior_]
priors = ', '.join(priors)
self.priors = temp_arr_.format(type='double', name='priors',
values=priors)
# Create negative probabilities:
neg_prob = log(1 - exp(est.feature_log_prob_))
probs = []
for prob in neg_prob:
tmp = [temp_type.format(self.repr(p)) for p in prob]
tmp = temp_arr.format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.neg_probs = temp_arr__.format(type='double', name='negProbs',
values=probs)
delta_probs = (est.feature_log_prob_ - neg_prob).T
probs = []
for prob in delta_probs:
tmp = [temp_type.format(self.repr(p)) for p in prob]
tmp = temp_arr.format(', '.join(tmp))
probs.append(tmp)
probs = ', '.join(probs)
self.del_probs = temp_arr__.format(type='double', name='delProbs',
values=probs)
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Separated:
return self.predict('separated') | Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name : string
The name of the method in the returned result.
:param export_data : bool, default: False
Whether the model data should be saved or not.
:param export_dir : string, default: '.' (current directory)
The directory where the model data should be saved.
:param export_filename : string, default: 'data.json'
The filename of the exported model data.
:param export_append_checksum : bool, default: False
Whether to append the checksum to the filename or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders. |
def convert_path(path):
"""
Convert path to a normalized format
"""
if os.path.isabs(path):
raise Exception("Cannot include file with absolute path {}. Please use relative path instead".format((path)))
path = os.path.normpath(path)
return path | Convert path to a normalized format |
def _threaded_start(self):
'''
Spawns a worker thread to do the expiration checks
'''
self.active = True
self.thread = Thread(target=self._main_loop)
self.thread.setDaemon(True)
self.thread.start() | Spawns a worker thread to do the expiration checks |
def _is_under_root(self, full_path):
"""Guard against arbitrary file retrieval."""
if (path.abspath(full_path) + path.sep)\
.startswith(path.abspath(self.root) + path.sep):
return True
else:
return False | Guard against arbitrary file retrieval. |
def value(self, x):
"""
converts an integer or FinitField.Value to a value of this FiniteField.
"""
return x if isinstance(x, FiniteField.Value) and x.field == self else FiniteField.Value(self, x) | converts an integer or FinitField.Value to a value of this FiniteField. |
def mk_pools(things, keyfnc=lambda x: x):
"Indexes a thing by the keyfnc to construct pools of things."
pools = {}
sthings = sorted(things, key=keyfnc)
for key, thingz in groupby(sthings, key=keyfnc):
pools.setdefault(key, []).extend(list(thingz))
return pools | Indexes a thing by the keyfnc to construct pools of things. |
def _write_ini(source_dict, namespace_name=None, level=0, indent_size=4,
output_stream=sys.stdout):
"""this function prints the components of a configobj ini file. It is
recursive for outputing the nested sections of the ini file."""
options = [
value
for value in source_dict.values()
if isinstance(value, Option)
]
options.sort(key=lambda x: x.name)
indent_spacer = " " * (level * indent_size)
for an_option in options:
print("%s# %s" % (indent_spacer, an_option.doc),
file=output_stream)
option_value = to_str(an_option)
if an_option.reference_value_from:
print(
'%s# see "%s.%s" for the default or override it here' % (
indent_spacer,
an_option.reference_value_from,
an_option.name
),
file=output_stream
)
if an_option.likely_to_be_changed or an_option.has_changed:
option_format = '%s%s=%s\n'
else:
option_format = '%s#%s=%s\n'
if isinstance(option_value, six.string_types) and \
',' in option_value:
# quote lists unless they're already quoted
if option_value[0] not in '\'"':
option_value = '"%s"' % option_value
print(option_format % (indent_spacer, an_option.name,
option_value),
file=output_stream)
next_level = level + 1
namespaces = [
(key, value)
for key, value in source_dict.items()
if isinstance(value, Namespace)
]
namespaces.sort(key=ValueSource._namespace_reference_value_from_sort)
for key, namespace in namespaces:
next_level_spacer = " " * next_level * indent_size
print("%s%s%s%s\n" % (indent_spacer, "[" * next_level, key,
"]" * next_level),
file=output_stream)
if namespace._doc:
print("%s%s" % (next_level_spacer, namespace._doc),
file=output_stream)
if namespace._reference_value_from:
print("%s#+include ./common_%s.ini\n"
% (next_level_spacer, key), file=output_stream)
if namespace_name:
ValueSource._write_ini(
source_dict=namespace,
namespace_name="%s.%s" % (namespace_name, key),
level=level+1,
indent_size=indent_size,
output_stream=output_stream
)
else:
ValueSource._write_ini(
source_dict=namespace,
namespace_name=key,
level=level+1,
indent_size=indent_size,
output_stream=output_stream
) | this function prints the components of a configobj ini file. It is
recursive for outputing the nested sections of the ini file. |
def release(ctx, yes, latest):
"""Create a new release in github
"""
m = RepoManager(ctx.obj['agile'])
api = m.github_repo()
if latest:
latest = api.releases.latest()
if latest:
click.echo(latest['tag_name'])
elif m.can_release('sandbox'):
branch = m.info['branch']
version = m.validate_version()
name = 'v%s' % version
body = ['Release %s from agiletoolkit' % name]
data = dict(
tag_name=name,
target_commitish=branch,
name=name,
body='\n\n'.join(body),
draft=False,
prerelease=False
)
if yes:
data = api.releases.create(data=data)
m.message('Successfully created a new Github release')
click.echo(niceJson(data))
else:
click.echo('skipped') | Create a new release in github |
def status(queue, munin, munin_config):
"""List queued tasks aggregated by name"""
if munin_config:
return status_print_config(queue)
queues = get_queues(queue)
for queue in queues:
status_print_queue(queue, munin=munin)
if not munin:
print('-' * 40) | List queued tasks aggregated by name |
def connect(self):
"""
Connect the client to IBM Watson IoT Platform using the underlying Paho MQTT client
# Raises
ConnectionException: If there is a problem establishing the connection.
"""
self.logger.debug(
"Connecting... (address = %s, port = %s, clientId = %s, username = %s)"
% (self.address, self.port, self.clientId, self.username)
)
try:
self.connectEvent.clear()
self.client.connect(self.address, port=self.port, keepalive=self.keepAlive)
self.client.loop_start()
if not self.connectEvent.wait(timeout=30):
self.client.loop_stop()
self._logAndRaiseException(
ConnectionException(
"Operation timed out connecting to IBM Watson IoT Platform: %s" % (self.address)
)
)
except socket.error as serr:
self.client.loop_stop()
self._logAndRaiseException(
ConnectionException("Failed to connect to IBM Watson IoT Platform: %s - %s" % (self.address, str(serr)))
) | Connect the client to IBM Watson IoT Platform using the underlying Paho MQTT client
# Raises
ConnectionException: If there is a problem establishing the connection. |
def split_on_condition(seq, condition):
"""Split a sequence into two iterables without looping twice"""
l1, l2 = tee((condition(item), item) for item in seq)
return (i for p, i in l1 if p), (i for p, i in l2 if not p) | Split a sequence into two iterables without looping twice |
def html5_serialize_simple_color(simple_color):
"""
Apply the serialization algorithm for a simple color from section
2.4.6 of HTML5.
"""
red, green, blue = simple_color
# 1. Let result be a string consisting of a single "#" (U+0023)
# character.
result = u'#'
# 2. Convert the red, green, and blue components in turn to
# two-digit hexadecimal numbers using lowercase ASCII hex
# digits, zero-padding if necessary, and append these numbers
# to result, in the order red, green, blue.
format_string = '{:02x}'
result += format_string.format(red)
result += format_string.format(green)
result += format_string.format(blue)
# 3. Return result, which will be a valid lowercase simple color.
return result | Apply the serialization algorithm for a simple color from section
2.4.6 of HTML5. |
def walk(self, top, topdown=True, ignore_file_handler=None):
"""Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument
"""
tree = self.git_object_by_path(top)
if tree is None:
raise IOError(errno.ENOENT, "No such file")
for x in self._walk(tree, topdown):
yield x | Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument |
def _restore_base_estimators(self, kernel_cache, out, X, cv):
"""Restore custom kernel functions of estimators for predictions"""
train_folds = {fold: train_index for fold, (train_index, _) in enumerate(cv)}
for idx, fold, _, est in out:
if idx in kernel_cache:
if not hasattr(est, 'fit_X_'):
raise ValueError(
'estimator %s uses a custom kernel function, '
'but does not have the attribute `fit_X_` after training' % self.base_estimators[idx][0])
est.set_params(kernel=self.base_estimators[idx][1].kernel)
est.fit_X_ = X[train_folds[fold]]
return out | Restore custom kernel functions of estimators for predictions |
def revise_helper(query):
"""
given sql containing a "CREATE TABLE {table_name} AS ({query})"
returns table_name, query
"""
match = re.search(extract_sql_regex, query, re.DOTALL | re.I)
return match.group(1), match.group(2) | given sql containing a "CREATE TABLE {table_name} AS ({query})"
returns table_name, query |
async def analog_write(self, pin, value):
"""
Set the selected pin to the specified value.
:param pin: PWM pin number
:param value: Pin value (0 - 0x4000)
:returns: No return value
"""
if PrivateConstants.ANALOG_MESSAGE + pin < 0xf0:
command = [PrivateConstants.ANALOG_MESSAGE + pin, value & 0x7f,
(value >> 7) & 0x7f]
await self._send_command(command)
else:
await self.extended_analog(pin, value) | Set the selected pin to the specified value.
:param pin: PWM pin number
:param value: Pin value (0 - 0x4000)
:returns: No return value |
def convert_timezone(date_str, tz_from, tz_to="UTC", fmt=None):
""" get timezone as tz_offset """
tz_offset = datetime_to_timezone(
datetime.datetime.now(), tz=tz_from).strftime('%z')
tz_offset = tz_offset[:3] + ':' + tz_offset[3:]
date = parse_date(str(date_str) + tz_offset)
if tz_from != tz_to:
date = datetime_to_timezone(date, tz_to)
if isinstance(fmt, str):
return date.strftime(fmt)
return date | get timezone as tz_offset |
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput() | Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging. |
def get_check(self, check):
"""
Returns an instance of the specified check.
"""
chk = self._check_manager.get(check)
chk.set_entity(self)
return chk | Returns an instance of the specified check. |
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = tf.exp(-tf.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = tf.math.log1p(-0.5 * safe_exp_neg_x)
return tf.where(x < 0., lower_solution, upper_solution) | Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled. |
def fopen(name, mode='r', buffering=-1):
"""Similar to Python's built-in `open()` function."""
f = _fopen(name, mode, buffering)
return _FileObjectThreadWithContext(f, mode, buffering) | Similar to Python's built-in `open()` function. |
def remove_input_link(self, process_code, input_code):
"""
Remove an input (technosphere or biosphere exchange) from a process, resolving all parameter issues
"""
# 1. find correct process
# 2. find correct exchange
# 3. remove that exchange
# 4. check for parameter conflicts?
# 4. run parameter scan to rebuild matrices?
#print(process_code, input_code)
process = self.database['items'][process_code]
exchanges = process['exchanges']
initial_count = len(exchanges)
new_exchanges = [e for e in exchanges if e['input'] != input_code]
product_code = [e['input'] for e in exchanges if e['type'] == 'production'][0]
#print(product_code)
param_id = [k for k, v in self.params.items() if (v['from'] == input_code[1] and v['to'] == product_code[1])][0]
#print (param_id)
problem_functions = self.check_param_function_use(param_id)
if len(problem_functions) != 0:
#print('the following functions have been removed:')
for p in problem_functions:
self.params[p[0]]['function'] = None
#print(p)
process['exchanges'] = new_exchanges
del self.params[param_id]
self.parameter_scan()
return initial_count - len(new_exchanges) | Remove an input (technosphere or biosphere exchange) from a process, resolving all parameter issues |
def run(self, start_command_srv):
""" Setup daemon process, start child forks, and sleep until
events are signalled.
`start_command_srv`
Set to ``True`` if command server should be started.
"""
if start_command_srv:
# note, this must be established *before* the task runner is forked
# so the task runner can communicate with the command server.
# fork the command server
self._command_server.start()
# drop root privileges; command server will remain as the only
# daemon process with root privileges. while root plugins have root
# shell access, they are known and the commands are logged by the
# command server.
self._drop_privs()
# fork the task runner
self._task_runner.start()
# setup signal handlers
self._reg_sighandlers()
while self.running:
time.sleep(self._sleep_period)
self.shutdown() | Setup daemon process, start child forks, and sleep until
events are signalled.
`start_command_srv`
Set to ``True`` if command server should be started. |
def fetchallfirstvalues(self, sql: str, *args) -> List[Any]:
"""Executes SQL; returns list of first values of each row."""
rows = self.fetchall(sql, *args)
return [row[0] for row in rows] | Executes SQL; returns list of first values of each row. |
def _handle_template_param_value(self):
"""Handle a template parameter's value at the head of the string."""
self._emit_all(self._pop())
self._context ^= contexts.TEMPLATE_PARAM_KEY
self._context |= contexts.TEMPLATE_PARAM_VALUE
self._emit(tokens.TemplateParamEquals()) | Handle a template parameter's value at the head of the string. |
def exception_log_and_respond(exception, logger, message, status_code):
"""Log an error and send jsonified respond."""
logger.error(message, exc_info=True)
return make_response(
message,
status_code,
dict(exception_type=type(exception).__name__, exception_message=str(exception)),
) | Log an error and send jsonified respond. |
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase') | Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data. |
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if not asyncio.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
try:
_chain_future(asyncio.ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future | Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result. |
def add_bindings(self, g: Graph) -> "PrefixLibrary":
""" Add bindings in the library to the graph
:param g: graph to add prefixes to
:return: PrefixLibrary object
"""
for prefix, namespace in self:
g.bind(prefix.lower(), namespace)
return self | Add bindings in the library to the graph
:param g: graph to add prefixes to
:return: PrefixLibrary object |
def _debug_check(self):
"""
Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort
"""
# old_start = 0
old_end = 0
old_sort = ""
for segment in self._list:
if segment.start <= old_end and segment.sort == old_sort:
raise AngrCFGError("Error in SegmentList: blocks are not merged")
# old_start = start
old_end = segment.end
old_sort = segment.sort | Iterates over list checking segments with same sort do not overlap
:raise: Exception: if segments overlap space with same sort |
def Normal(cls,
mean: 'TensorFluent', variance: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Normal sampling op with given mean and variance.
Args:
mean: The mean parameter of the Normal distribution.
variance: The variance parameter of the Normal distribution.
batch_size: The size of the batch (optional).
Returns:
The Normal distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if mean.scope != variance.scope:
raise ValueError('Normal distribution: parameters must have same scope!')
loc = mean.tensor
scale = tf.sqrt(variance.tensor)
dist = tf.distributions.Normal(loc, scale)
batch = mean.batch or variance.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | Returns a TensorFluent for the Normal sampling op with given mean and variance.
Args:
mean: The mean parameter of the Normal distribution.
variance: The variance parameter of the Normal distribution.
batch_size: The size of the batch (optional).
Returns:
The Normal distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope. |
def _get_primary_index_in_altered_table(self, diff):
"""
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict
"""
primary_index = {}
for index in self._get_indexes_in_altered_table(diff).values():
if index.is_primary():
primary_index = {index.get_name(): index}
return primary_index | :param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: dict |
def contains(self, column, value):
"""
Set the main dataframe instance to rows that contains a string
value in a column
"""
df = self.df[self.df[column].str.contains(value) == True]
if df is None:
self.err("Can not select contained data")
return
self.df = df | Set the main dataframe instance to rows that contains a string
value in a column |
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
return | Install the plugin.
:param app: Sphinx application context. |
def cyan(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('cyan', string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color |
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs):
"""Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
"""
return self._write_op(self._require_dataset_nosync, name, shape=shape,
dtype=dtype, exact=exact, **kwargs) | Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype. |
def submit_order(id_or_ins, amount, side, price=None, position_effect=None):
"""
通用下单函数,策略可以通过该函数自由选择参数下单。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param float amount: 下单量,需为正数
:param side: 多空方向,多(SIDE.BUY)或空(SIDE.SELL)
:type side: :class:`~SIDE` enum
:param float price: 下单价格,默认为None,表示市价单
:param position_effect: 开平方向,开仓(POSITION_EFFECT.OPEN),平仓(POSITION.CLOSE)或平今(POSITION_EFFECT.CLOSE_TODAY),交易股票不需要该参数
:type position_effect: :class:`~POSITION_EFFECT` enum
:return: :class:`~Order` object | None
:example:
.. code-block:: python
# 购买 2000 股的平安银行股票,并以市价单发送:
submit_order('000001.XSHE', 2000, SIDE.BUY)
# 平 10 份 RB1812 多方向的今仓,并以 4000 的价格发送限价单
submit_order('RB1812', 10, SIDE.SELL, price=4000, position_effect=POSITION_EFFECT.CLOSE_TODAY)
"""
order_book_id = assure_order_book_id(id_or_ins)
env = Environment.get_instance()
if (
env.config.base.run_type != RUN_TYPE.BACKTEST
and env.get_instrument(order_book_id).type == "Future"
):
if "88" in order_book_id:
raise RQInvalidArgument(
_(u"Main Future contracts[88] are not supported in paper trading.")
)
if "99" in order_book_id:
raise RQInvalidArgument(
_(u"Index Future contracts[99] are not supported in paper trading.")
)
style = cal_style(price, None)
market_price = env.get_last_price(order_book_id)
if not is_valid_price(market_price):
user_system_log.warn(
_(u"Order Creation Failed: [{order_book_id}] No market data").format(
order_book_id=order_book_id
)
)
return
amount = int(amount)
order = Order.__from_create__(
order_book_id=order_book_id,
quantity=amount,
side=side,
style=style,
position_effect=position_effect,
)
if order.type == ORDER_TYPE.MARKET:
order.set_frozen_price(market_price)
if env.can_submit_order(order):
env.broker.submit_order(order)
return order | 通用下单函数,策略可以通过该函数自由选择参数下单。
:param id_or_ins: 下单标的物
:type id_or_ins: :class:`~Instrument` object | `str`
:param float amount: 下单量,需为正数
:param side: 多空方向,多(SIDE.BUY)或空(SIDE.SELL)
:type side: :class:`~SIDE` enum
:param float price: 下单价格,默认为None,表示市价单
:param position_effect: 开平方向,开仓(POSITION_EFFECT.OPEN),平仓(POSITION.CLOSE)或平今(POSITION_EFFECT.CLOSE_TODAY),交易股票不需要该参数
:type position_effect: :class:`~POSITION_EFFECT` enum
:return: :class:`~Order` object | None
:example:
.. code-block:: python
# 购买 2000 股的平安银行股票,并以市价单发送:
submit_order('000001.XSHE', 2000, SIDE.BUY)
# 平 10 份 RB1812 多方向的今仓,并以 4000 的价格发送限价单
submit_order('RB1812', 10, SIDE.SELL, price=4000, position_effect=POSITION_EFFECT.CLOSE_TODAY) |
def called_alts_from_genotype(self):
'''Returns a set of the (maybe REF and) ALT strings that were called, using GT in FORMAT.
Returns None if GT not in the record'''
if 'GT' not in self.FORMAT:
return None
genotype_indexes = set([int(x) for x in self.FORMAT['GT'].split('/')])
alts = set()
for i in genotype_indexes:
if i == 0:
alts.add(self.REF)
else:
alts.add(self.ALT[i-1])
return alts | Returns a set of the (maybe REF and) ALT strings that were called, using GT in FORMAT.
Returns None if GT not in the record |
def get_2d_markers_linearized(
self, component_info=None, data=None, component_position=None, index=None
):
"""Get 2D linearized markers.
:param index: Specify which camera to get 2D from, will be returned as
first entry in the returned array.
"""
return self._get_2d_markers(
data, component_info, component_position, index=index
) | Get 2D linearized markers.
:param index: Specify which camera to get 2D from, will be returned as
first entry in the returned array. |
def _create_table_and_update_context(node, context):
"""Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table.
"""
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table | Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table. |
def vx(self,*args,**kwargs):
"""
NAME:
vx
PURPOSE:
return x velocity at time t
INPUT:
t - (optional) time at which to get the velocity (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vx(t)
HISTORY:
2010-11-30 - Written - Bovy (NYU)
"""
out= self._orb.vx(*args,**kwargs)
if len(out) == 1: return out[0]
else: return out | NAME:
vx
PURPOSE:
return x velocity at time t
INPUT:
t - (optional) time at which to get the velocity (can be Quantity)
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vx(t)
HISTORY:
2010-11-30 - Written - Bovy (NYU) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.