docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Generate file in defined format representing the report of pipeline(s).
Args:
store (Store): report data.
report_format (str): currently "html" is supported only.
path (str): path where to write the report to. Missing sub folders will be created.
|
def generate(store, report_format, path):
success = False
if report_format in ['html']:
rendered_content = {
'html': generate_html
}[report_format](store)
if not os.path.isdir(path):
os.makedirs(path)
if rendered_content is not None:
# writing report file
with open(os.path.join(path, 'pipeline.' + report_format), 'w') as handle:
handle.write(rendered_content)
success = True
else:
Logger.get_logger(__name__).error("Unknown report format %s", report_format)
return success
| 829,938 |
Find rule for given condition.
Args:
condition (str): Python condition as string.
Returns:
str, list, function: found rule name, list of AST tokens for condition
and verification function.
|
def find_rule(condition):
final_condition = re.sub('{{.*}}', '42', condition)
ast_tokens = Condition.get_tokens(final_condition)
ast_compressed_tokens = Condition.compress_tokens(ast_tokens)
name = 'undefined'
function = lambda tokens: False
if len(ast_compressed_tokens) > 0:
for rule in Condition.RULES:
if Condition.match_tokens(ast_compressed_tokens, rule['types']):
name = rule['name']
function = rule['evaluate']
break
return name, ast_tokens, function
| 829,947 |
Get creator function by name.
Args:
name (str): name of the creator function.
Returns:
function: creater function.
|
def get_creator_by_name(name):
return {'docker(container)': Container.creator,
'shell': Bash.creator, 'docker(image)': Image.creator,
'python': Script.creator, 'packer': Packer.creator,
'ansible(simple)': Ansible.creator}[name]
| 829,961 |
Copying and merging environment variables.
Args:
include_os (bool): when true then include the environment variables (default: False)
Returns:
dict: environment variables as defined in the pipeline
(optional including system environment variables).
|
def get_merged_env(self, include_os=False):
env = {}
if include_os:
env.update(os.environ.copy())
for level in range(3):
env.update(self.pipeline.data.env_list[level].copy())
return env
| 829,964 |
Saving output for configured variable name.
Args:
shell_entry(dict): shell based configuration (shell, docker container or Python).
output: list of strings representing output of last shell
|
def __handle_variable(self, shell_entry, output):
if 'variable' in shell_entry:
variable_name = shell_entry['variable']
self.pipeline.variables[variable_name] = "\n".join(output)
| 829,973 |
Adding all files from given path to the object.
Args:
path (str): valid, existing directory
|
def add_path(self, path, path_filter=None):
for root, _, files in os.walk(path):
for filename in files:
full_path_and_filename = os.path.join(root, filename)
if path_filter is None or path_filter(full_path_and_filename):
relative_path_and_filename = full_path_and_filename.replace(path + '/', '')
with open(full_path_and_filename, 'rb') as handle:
self.files[relative_path_and_filename] = b64encode(handle.read()).decode('utf-8')
| 830,032 |
Saving stored files at a given path (relative paths are added).
Args:
path (str): root path where to save the files.
|
def save(self, path):
for relative_path_and_filename, content in self.files.items():
full_path_and_filename = os.path.join(path, relative_path_and_filename)
full_path = os.path.dirname(full_path_and_filename)
if not os.path.isdir(full_path):
os.makedirs(full_path)
with open(full_path_and_filename, 'wb') as handle:
handle.write(b64decode(content))
| 830,033 |
Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage
|
def from_json(data):
memfiles = InMemoryFiles()
memfiles.files = json.loads(data)
return memfiles
| 830,034 |
Acquire the :attr:`lock`
Args:
blocking (bool): See :meth:`threading.Lock.acquire`
timeout (float): See :meth:`threading.Lock.acquire`
Returns:
bool: :obj:`True` if the lock was acquired, otherwise :obj:`False`
|
def acquire(self, blocking=True, timeout=-1):
result = self.lock.acquire(blocking, timeout)
return result
| 830,094 |
Triggers any stored :class:`waiters <AioEventWaiter>`
Calls :meth:`AioEventWaiter.trigger` method on all instances stored in
:attr:`waiters`. After completion, the :attr:`waiters` are removed.
Args:
*args: Positional arguments to pass to :meth:`AioEventWaiter.trigger`
**kwargs: Keyword arguments to pass to :meth:`AioEventWaiter.trigger`
|
def __call__(self, *args, **kwargs):
with self.lock:
for waiter in self.waiters:
waiter.trigger(*args, **kwargs)
self.waiters.clear()
| 830,099 |
Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add
|
def add_method(self, loop, callback):
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop
| 830,101 |
Triggers all stored callbacks (coroutines)
Args:
*args: Positional arguments to pass to callbacks
**kwargs: Keyword arguments to pass to callbacks
|
def __call__(self, *args, **kwargs):
for loop, m in self.iter_methods():
coro = m(*args, **kwargs)
self.submit_coroutine(coro, loop)
| 830,104 |
Returns a map Layer.
Arguments:
queryset -- QuerySet for Layer
Keyword args:
stylename -- str name of style to apply
|
def layer(self, queryset, stylename=None):
cls = RasterLayer if hasattr(queryset, 'image') else VectorLayer
layer = cls(queryset, style=stylename)
try:
style = self.map.find_style(layer.stylename)
except KeyError:
self.map.append_style(layer.stylename, layer.style())
layer.styles.append(layer.stylename)
self.map.layers.append(layer._layer)
return layer
| 831,133 |
Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent
|
def zoom_bbox(self, bbox):
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent))
| 831,135 |
Returns a 1D array with higher dimensions aggregated using stat fn.
Arguments:
arr -- ndarray
stat -- numpy or numpy.ma function as str to call
|
def agg_dims(arr, stat):
axis = None
if arr.ndim > 2:
axis = 1
arr = arr.reshape(arr.shape[0], -1)
module = np.ma if hasattr(arr, 'mask') else np
return getattr(module, stat)(arr, axis)
| 834,955 |
Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
|
def tile(self, bbox, z=0, format=None, clip=True):
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
# Estimate tile width in degrees instead of meters.
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == 'pbf':
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql})
| 834,959 |
Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int
|
def aggregate_periods(self, periods):
try:
fieldname = self.raster_field.name
except TypeError:
raise exceptions.FieldDoesNotExist('Raster field not found')
arrays = self.arrays(fieldname)
arr = arrays[0]
if len(arrays) > 1:
if getattr(arr, 'ndim', 0) > 2:
arrays = np.vstack(arrays)
fill = getattr(arr, 'fill_value', None)
arr = np.ma.masked_values(arrays, fill, copy=False)
# Try to reshape using equal sizes first and fall back to unequal
# splits.
try:
means = arr.reshape((periods, -1)).mean(axis=1)
except ValueError:
means = np.array([a.mean() for a in np.array_split(arr, periods)])
obj = self[0]
setattr(obj, fieldname, means)
return [obj]
| 834,961 |
Returns a new RasterQuerySet with subsetted/summarized ndarrays.
Arguments:
geom -- geometry for masking or spatial subsetting
Keyword args:
stat -- any numpy summary stat method as str (min/max/mean/etc)
|
def summarize(self, geom, stat=None):
if not hasattr(geom, 'num_coords'):
raise TypeError('Need OGR or GEOS geometry, %s found' % type(geom))
clone = self._clone()
for obj in clone:
arr = obj.array(geom)
if arr is not None:
if stat:
arr = agg_dims(arr, stat)
try:
arr = arr.squeeze()
except ValueError:
pass
obj.image = arr
return clone
| 834,964 |
Generate a number in the range [0, num_buckets).
Args:
key (int): The key to hash.
num_buckets (int): Number of buckets to use.
Returns:
The bucket number `key` computes to.
Raises:
ValueError: If `num_buckets` is not a positive number.
|
def py_hash(key, num_buckets):
b, j = -1, 0
if num_buckets < 1:
raise ValueError('num_buckets must be a positive number')
while j < num_buckets:
b = int(j)
key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff
j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1))
return int(b)
| 836,141 |
Returns a Feature or FeatureCollection.
Arguments:
data -- Sequence or Mapping of Feature-like or FeatureCollection-like data
|
def as_feature(data):
if not isinstance(data, (Feature, FeatureCollection)):
if is_featurelike(data):
data = Feature(**data)
elif has_features(data):
data = FeatureCollection(**data)
elif isinstance(data, collections.Sequence):
data = FeatureCollection(features=data)
elif has_layer(data):
data = LayerCollection(data)
elif has_coordinates(data):
data = Feature(geometry=data)
elif isinstance(data, collections.Mapping) and not data:
data = Feature()
return data
| 836,183 |
Get environment variable or provide default.
Args:
var (str): environment variable to search for
default (optional(str)): default to return
|
def env_or_default(var, default=None):
if var in os.environ:
return os.environ[var]
return default
| 836,502 |
Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'region_name': 'us-east-1'}
Returns:
str: encrypted cipher text
|
def kms_encrypt(value, key, aws_config=None):
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob']))
| 836,503 |
Sends the given command to the server.
Args:
command (str): Command to send to the server.
Raises:
ConnectionResetError: If the connection with the server is lost.
(Shouldn't it raise BrokenPipeError too ?)
|
async def send_command(self, command):
command = "{}\r\n".format(command).encode("ascii", errors="backslashreplace")
self.write(command)
# Don't forget to drain or the command will stay buffered:
await self.drain()
| 836,678 |
Return XHTML content of a page.
Parameters:
- id: id of a Confluence page.
|
def get_page_content(id):
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["body"]["storage"]["value"]
| 836,749 |
Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page.
|
def get_page_name(id):
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["title"]
| 836,750 |
Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in.
|
def get_page_id(name, space):
data = _json.loads(_api.rest("?title=" + name.replace(" ", "%20") + "&"
"spaceKey=" + space + "&expand=history"))
try:
return data["results"][0]["id"]
except:
return ("Page not found!")
| 836,751 |
Create a page in Confluence.
Parameters:
- name: name of the Confluence page to create.
- parent_id: ID of the intended parent of the page.
- space: key of the space where the page will be created.
- content: XHTML content to be written to the page.
Notes: the page id can be obtained by getting ["id"] from the returned JSON.
|
def create_page(name, parent_id, space, content):
data = {}
data["type"] = "page"
data["title"] = name
data["ancestors"] = [{"id": str(parent_id)}]
data["space"] = {"key": space}
data["body"] = {"storage": {"value": content, "representation": "storage"}}
return _api.rest("/", "POST", _json.dumps(data))
| 836,752 |
Update a page in Confluence.
Parameters:
- id: ID of the page you wish to edit.
- name: name you would like to give to the page (usually the same name).
- space: space where the page lives.
- content: XHTML content to be written to the page.
Notes: it is required to try an initial update to find the page version.
|
def edit_page(id, name, space, content):
data = {}
data["id"] = str(id)
data["type"] = "page"
data["title"] = name
data["space"] = {"key": space}
data["body"] = {"storage": {"value": content, "representation": "storage"}}
data["version"] = {"number": 1}
response = _api.rest("/" + str(id), "PUT", _json.dumps(data))
new_version = int(_json.loads(response)["message"].split()[-1]) + 1
data["version"]["number"] = new_version
return _api.rest("/" + str(id), "PUT", _json.dumps(data))
| 836,753 |
Delete a page from Confluence, along with its children.
Parameters:
- id: id of a Confluence page.
Notes:
- Getting a 204 error is expected! It means the page can no longer be found.
|
def delete_page_full(id):
children = _json.loads(get_page_children(id))
for i in children["results"]:
delete_page_full(i["id"])
return delete_page(id)
| 836,754 |
Sends the given command to the server.
Args:
*args: Command and arguments to be sent to the server.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPCommandFailedError: If the command fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
|
async def do_cmd(self, *args, success=None):
if success is None:
success = (250,)
cmd = " ".join(args)
await self.writer.send_command(cmd)
code, message = await self.reader.read_reply()
if code not in success:
raise SMTPCommandFailedError(code, message, cmd)
return code, message
| 836,759 |
Configure the device.
This method applies configuration to the device.
Args:
configlet (text): The configuration template.
plane (text): sdr or admin
attributes (dict): The dictionary of attributes used in template.
Returns:
A string with commit label or None
|
def config(self, configlet=None, plane='sdr', **attributes):
begin = time.time()
label = self._chain.target_device.config(configlet, plane, **attributes)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration change last {:.0f}s. Label: {}".format(elapsed, label),
log_level=logging.INFO)
else:
self.emit_message("Configuration failed.", log_level=logging.WARNING)
return label
| 836,871 |
Rollback the configuration.
This method rolls back the configuration on the device.
Args:
label (text): The configuration label ID
plane: (text): sdr or admin
Returns:
A string with commit label or None
|
def rollback(self, label=None, plane='sdr'):
begin = time.time()
rb_label = self._chain.target_device.rollback(label=label, plane=plane)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration rollback last {:.0f}s. Label: {}".format(elapsed, rb_label),
log_level=logging.INFO)
else:
self.emit_message("Configuration failed.", log_level=logging.WARNING)
return rb_label
| 836,872 |
Discover the device details.
This method discover several device attributes.
Args:
logfile (file): Optional file descriptor for session logging. The file must be open for write.
The session is logged only if ``log_session=True`` was passed to the constructor.
It the parameter is not passed then the default *session.log* file is created in `log_dir`.
|
def discovery(self, logfile=None, tracefile=None):
self._enable_logging(logfile=logfile, tracefile=tracefile)
self.log("'discovery' method is deprecated. Please 'connect' with force_discovery=True.")
self.log("Device discovery process started")
self.connect(logfile=logfile, force_discovery=True, tracefile=tracefile)
self.disconnect()
| 836,873 |
Initializes a new instance of SMTPCommandFailedError.
Args:
code (int): Error code returned by the SMTP server.
message (str): Exception message, ideally providing help for the
user.
command (str): Command sent to the server that originated the
exception.
|
def __init__(self, code, message=None, command=None):
super().__init__(message)
self.code = code
self.command = command
| 837,030 |
Initialize the GeneralError object.
Args:
message (str): Custom message to be passed to the exceptions. Defaults to *None*.
If *None* then the general class *__doc__* is used.
host (str): Custom string which can be used to enhance the exception message by adding the "`host`: "
prefix to the message string. Defaults to *None*. If `host` is *None* then message stays unchanged.
|
def __init__(self, message=None, host=None):
self.message = message
self.hostname = str(host) if host else None
| 837,070 |
The main conversion process.
Args:
- string -- str, context to be converted
- lineno -- dict<int: int>, line number to actual offset mapping
Returns:
- str -- converted string
|
def convert(string, lineno):
def parse(string):
try:
return parso.parse(string, error_recovery=False,
version=os.getenv('F2FORMAT_VERSION', PARSO_VERSION[-1]))
except parso.ParserSyntaxError as error:
message = '%s: <%s: %r> from %r' % (error.message, err.error_leaf.token_type,
err.error_leaf.value, string)
raise ConvertError(message)
source = strarray(string) # strarray source (mutable)
f_string = [list()] # [[token, ...], [...], ...] -> concatenable strings
str_flag = False # if previous item is token.STRING
for token in tokenize.generate_tokens(io.StringIO(string).readline):
cat_flag = False # if item is concatenable with previous item, i.e. adjacent string
if token.type == tokenize.STRING:
if str_flag:
cat_flag = True
if cat_flag:
f_string[-1].append(token)
else:
f_string.append([token])
str_flag = True
elif token.type == tokenize.NL: # skip token.NL
continue
else: # otherwise, not concatenable
str_flag = False
# print(token) ###
# print() ###
# import pprint ###
# pprint.pprint(f_string) ###
# print() ###
for tokens in reversed(f_string): # for each string concatenation
# check if has f-string literal in this concatenation
future = any(map(lambda token: re.match(r'^(f|rf|fr)', token.string, re.IGNORECASE), tokens))
if not future:
continue
entryl = list()
for token in tokens: # for each token in concatenation
token_string = token.string
module = parse(token_string) # parse AST, get parso.python.tree.Module, _.children -> list
# _[0] -> parso.python.tree.PythonNode
# _[1] -> parso.python.tree.EndMarker
tmpval = module.children[0] # parsed string token
tmpent = list() # temporary entry list
if tmpval.type == 'fstring': # parso.python.tree.PythonNode.type -> str, string / fstring
# parso.python.tree.PythonNode.children[0] -> parso.python.tree.FStringStart, regex: /^((f|rf|fr)(|'|"""|")$/
for obj in tmpval.children[1:-1]: # traverse parso.python.tree.PythonNode.children -> list # noqa
if obj.type == 'fstring_expr': # expression part (in braces), parso.python.tree.PythonNode # noqa
obj_children = obj.children # parso.python.tree.PythonNode.children -> list
# _[0] -> parso.python.tree.Operator, '{' # noqa
# _[1] -> %undetermined%, expression literal (f_expression) # noqa
# _[2] -> %optional%, parso.python.tree.PythonNode, format specification (format_spec) # noqa
# -[3] -> parso.python.tree.Operator, '}' # noqa
start_expr = obj_children[1].start_pos[1]
end_expr = obj_children[1].end_pos[1]
tmpent.append(slice(start_expr, end_expr)) # entry of expression literal (f_expression)
if obj_children[2].type == 'fstring_format_spec':
for node in obj_children[2].children: # traverse format specifications (format_spec)
if node.type == 'fstring_expr': # expression part (in braces), parso.python.tree.PythonNode # noqa
node_chld = node.children # parso.python.tree.PythonNode.children -> list # noqa
# _[0] -> parso.python.tree.Operator, '{' # noqa
# _[1] -> %undetermined%, expression literal (f_expression) # noqa
# _[2] -> parso.python.tree.Operator, '}' # noqa
start = node_chld[1].start_pos[1]
end = node_chld[1].end_pos[1]
tmpent.append(slice(start, end))
# print('length:', length, '###', token_string[:length], '###', token_string[length:]) ###
entryl.append((token, tmpent)) # each token with a concatenation entry list
# print('entry: ', end='') ###
# pprint.pprint(entryl) ###
# print() ###
expr = list()
for token, entries in entryl: # extract expressions
# print(token.string, entries) ###
for entry in entries: # walk entries
temp_expr = token.string[entry] # original expression
val = parse(temp_expr).children[0] # parse AST
if val.type == 'testlist_star_expr' and \
re.fullmatch(r'\(.*\)', temp_expr, re.DOTALL) is None: # if expression is implicit tuple
real_expr = '(%s)' % temp_expr # add parentheses
else:
real_expr = temp_expr # or keep original
expr.append(real_expr) # record expression
# print() ###
# print('expr: ', end='') ###
# pprint.pprint(expr) ###
# convert end of f-string to str.format literal
end = lineno[tokens[-1].end[0]] + tokens[-1].end[1]
source[end:end+1] = '.format(%s)%s' % (', '.join(expr), source[end])
# for each token, convert expression literals and brace '{}' escape sequences
for token, entries in reversed(entryl): # using reversed to keep offset in leading context
token_start = lineno[token.start[0]] + token.start[1] # actual offset at start of token
token_end = lineno[token.end[0]] + token.end[1] # actual offset at end of token
if entries: # for f-string expressions, replace with empty string ('')
for entry in reversed(entries):
start = token_start + entry.start
end = token_start + entry.stop
source[start:end] = ''
else: # for escape sequences, double braces
source[token_start:token_end] = re.sub(r'([{}])', r'\1\1', source[token_start:token_end])
# strip leading f-string literals ('[fF]')
string = source[token_start:token_start+3]
if re.match(r'^(rf|fr|f)', string, re.IGNORECASE) is not None:
source[token_start:token_start+3] = re.sub(r'[fF]', r'', string, count=1)
# return modified context
return str(source)
| 837,517 |
Wrapper works for conversion.
Args:
- filename -- str, file to be converted
|
def f2format(filename):
print('Now converting %r...' % filename)
# fetch encoding
encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING)
lineno = dict() # line number -> file offset
content = list() # file content
with open(filename, 'r', encoding=encoding) as file:
lineno[1] = 0
for lnum, line in enumerate(file, start=1):
content.append(line)
lineno[lnum+1] = lineno[lnum] + len(line)
# now, do the dirty works
string = ''.join(content)
text = convert(string, lineno)
# dump back to the file
with open(filename, 'w', encoding=encoding) as file:
file.write(text)
| 837,518 |
Log events to the console.
Args:
status (bool, Optional, Default=True)
whether logging to console should be turned on(True) or off(False)
level (string, Optional, Default=None) :
level of logging; whichever level is chosen all higher levels
will be logged.
See: https://docs.python.org/2/library/logging.html#levels
|
def log_to_console(status=True, level=None):
if status:
if level is not None:
LOGGER.setLevel(level)
console_handler = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(levelname)s-%(name)s: %(message)s')
# add formatter to handler
console_handler.setFormatter(formatter)
LOGGER.addHandler(console_handler)
LOGGER.info("pangaea %s", version())
else:
for handle in LOGGER.handlers:
if type(handle).__name__ == 'StreamHandler':
LOGGER.removeHandler(handle)
| 837,963 |
Log events to a file.
Args:
status (bool, Optional, Default=True)
whether logging to file should be turned on(True) or off(False)
filename (string, Optional, Default=None) :
path of file to log to
level (string, Optional, Default=None) :
level of logging; whichever level is chosen all higher levels
will be logged.
See: https://docs.python.org/2/library/logging.html#levels
|
def log_to_file(status=True, filename=DEFAULT_LOG_FILE, level=None):
if status:
if level is not None:
LOGGER.setLevel(level)
try:
os.mkdir(os.path.dirname(filename))
except OSError:
pass
file_handler = logging.FileHandler(filename)
# create formatter
fomat_str = '%(levelname)s-%(name)s: %(message)s'
formatter = logging.Formatter(fomat_str)
# add formatter to handler
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.info("pangaea %s", version())
else:
for handle in LOGGER.handlers:
if type(handle).__name__ == 'FileHandler':
LOGGER.removeHandler(handle)
| 837,964 |
Returns a builder with stemmers for all languages added to it.
Args:
languages (list): A list of supported languages.
|
def get_nltk_builder(languages):
all_stemmers = []
all_stopwords_filters = []
all_word_characters = set()
for language in languages:
if language == "en":
# use Lunr's defaults
all_stemmers.append(lunr.stemmer.stemmer)
all_stopwords_filters.append(stop_word_filter)
all_word_characters.update({r"\w"})
else:
stopwords, word_characters = _get_stopwords_and_word_characters(language)
all_stemmers.append(
Pipeline.registered_functions["stemmer-{}".format(language)]
)
all_stopwords_filters.append(
generate_stop_word_filter(stopwords, language=language)
)
all_word_characters.update(word_characters)
builder = Builder()
multi_trimmer = generate_trimmer("".join(sorted(all_word_characters)))
Pipeline.register_function(
multi_trimmer, "lunr-multi-trimmer-{}".format("-".join(languages))
)
builder.pipeline.reset()
for fn in chain([multi_trimmer], all_stopwords_filters, all_stemmers):
builder.pipeline.add(fn)
for fn in all_stemmers:
builder.search_pipeline.add(fn)
return builder
| 839,373 |
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`:
|
def boundary_transform(seq, force_edges = True):
gen = boundary_words(seq)
if force_edges:
gen = boundary_edges(gen)
gen = remove_duplicates(gen)
for char in gen:
yield char
| 839,653 |
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`:
|
def boundary_words(seq):
in_word = None
for char in seq:
if char == '\x00' and in_word is not None:
in_word = not in_word
elif char in WHITESPACE_CHARS:
if in_word is not None and in_word:
yield '\x00'
in_word = False
else:
if in_word is not None and not in_word:
yield '\x00'
in_word = True
yield char
| 839,654 |
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`:
|
def remove_duplicates(seq):
last_boundary = False
for char in seq:
if char == '\x00':
if not last_boundary:
last_boundary = True
yield char
else:
last_boundary = False
yield char
| 839,655 |
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`:
|
def replace(self, seq):
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0][0] represents a match of length 1 starting at character 0
# chart[0][n-1] represents a match of length 1 starting at character n-1
# cells in the chart are tuples:
# (score, list)
# we initialise chart by filling in row 0:
# each cell gets assigned (0, char), where char is the character at
# the corresponding position in the input string
chart = [ [None for _i in range(len(seq)) ] for _i in range(len(seq)) ]
chart[0] = [(0, char) for char in seq]
# now we fill in the chart using the results from the aho-corasick
# string matches
for (begin, length, value) in self.find_all(seq):
chart[length-1][begin] = (length, value)
# now we need to fill in the chart row by row, starting with row 1
for row in range(1, len(chart)):
# each row is 1 cell shorter than the last
for col in range(len(seq) - row):
# the entry in [row][col] is the choice with the highest score; to
# find this, we must search the possible partitions of the cell
#
# things on row 2 have only one possible partition: 1 + 1
# things on row 3 have two: 1 + 2, 2 + 1
# things on row 4 have three: 1+3, 3+1, 2+2
#
# we assume that any pre-existing entry found by aho-corasick
# in a cell is already optimal
#print('scanning [{}][{}]'.format(row, col))
if chart[row][col] is not None:
continue
# chart[1][2] is the cell of matches of length 2 starting at
# character position 2;
# it can only be composed of chart[0][2] + chart[0][3]
#
# partition_point is the length of the first of the two parts
# of the cell
#print('cell[{}][{}] => '.format(row, col))
best_score = -1
best_value = None
for partition_point in range(row):
# the two cells will be [partition_point][col] and
# [row - partition_point - 2][col+partition_point+1]
x1 = partition_point
y1 = col
x2 = row - partition_point - 1
y2 = col + partition_point + 1
#print(' [{}][{}] + [{}][{}]'.format(x1, y1, x2, y2))
s1, v1 = chart[x1][y1]
s2, v2 = chart[x2][y2]
# compute the score
score = s1 + s2
#print(' = {} + {}'.format((s1, v1), (s2, v2)))
#print(' = score {}'.format(score))
if best_score < score:
best_score = score
best_value = v1 + v2
chart[row][col] = (best_score, best_value)
#print(' sets new best score with value {}'.format(
# best_value))
# now the optimal solution is stored at the top of the chart
return chart[len(seq)-1][0][1]
| 839,668 |
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on
|
def greedy_replace(self, seq):
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
break
elif current.has_suffix:
current = current.suffix
if current.depth:
outstr += buffered[:-current.depth]
buffered = buffered[-current.depth:]
else:
outstr += buffered
buffered = ''
break
else:
current = self.root
outstr += buffered
buffered = ''
break
if char in current:
buffered += char
current = current[char]
if current.has_value:
outstr += buffered[:-current.depth]
outstr += current.value
buffered = ''
current = self.root
else:
assert current is self.root
outstr += buffered + char
buffered = ''
if current.has_dict_suffix:
current = current.dict_suffix
outstr += buffered[:-current.depth]
outstr += current.value
else:
outstr += buffered
return outstr
| 839,669 |
Performs a search against the index using lunr query syntax.
Results will be returned sorted by their score, the most relevant
results will be returned first.
For more programmatic querying use `lunr.Index.query`.
Args:
query_string (str): A string to parse into a Query.
Returns:
dict: Results of executing the query.
|
def search(self, query_string):
query = self.create_query()
# TODO: should QueryParser be a method of query? should it return one?
parser = QueryParser(query_string, query)
parser.parse()
return self.query(query)
| 839,794 |
Convenience method to create a Query with the Index's fields.
Args:
fields (iterable, optional): The fields to include in the Query,
defaults to the Index's `all_fields`.
Returns:
Query: With the specified fields or all the fields in the Index.
|
def create_query(self, fields=None):
if fields is None:
return Query(self.fields)
non_contained_fields = set(fields) - set(self.fields)
if non_contained_fields:
raise BaseLunrException(
"Fields {} are not part of the index", non_contained_fields
)
return Query(fields)
| 839,795 |
Get a configuration value, using fallback for missing values.
Parameters:
config -- the configparser to try to extract the option value from.
section -- the section to extract value from.
option -- the name of the option to extract value from.
fallback -- fallback value to return if no value was set in `config`.
returns -- the config option value if it was set, else fallback.
|
def _get_with_fallback(config, section, option, fallback):
exists = (config.has_section(section)
and config.has_option(section, option))
if not exists:
return fallback
else:
return config.get(section, option)
| 839,837 |
Actually execute the program.
Calling this method can be done from tests to simulate executing the
application from command line.
Parameters:
options -- `optionparser` from config file.
exit_codeword -- an optional exit_message that will shut down Rewind. Used
for testing.
returns -- exit code for the application. Non-zero for errors.
|
def run(options, exit_codeword=None):
QUERY_ENDP_OPT = 'query-bind-endpoint'
STREAM_ENDP_OPT = 'streaming-bind-endpoint'
ZMQ_NTHREADS = "zmq-nthreads"
if not options.has_section(config.DEFAULT_SECTION):
msg = "Missing default section, `{0}`."
fmsg = msg.format(config.DEFAULT_SECTION)
raise config.ConfigurationError(fmsg)
if not options.has_option(config.DEFAULT_SECTION, QUERY_ENDP_OPT):
msg = "Missing (query) bind endpoint in option file: {0}:{1}"
fmsg = msg.format(config.DEFAULT_SECTION, QUERY_ENDP_OPT)
raise config.ConfigurationError(fmsg)
queryendp = options.get(config.DEFAULT_SECTION, QUERY_ENDP_OPT).split(",")
streamendp = _get_with_fallback(options, config.DEFAULT_SECTION,
STREAM_ENDP_OPT, '').split(",")
queryendp = filter(lambda x: x.strip(), queryendp)
streamendp = filter(lambda x: x.strip(), streamendp)
try:
eventstore = config.construct_eventstore(options)
except config.ConfigurationError as e:
_logger.exception("Could instantiate event store from config file.")
raise
zmq_nthreads = _get_with_fallback(options, config.DEFAULT_SECTION,
ZMQ_NTHREADS, '3')
try:
zmq_nthreads = int(zmq_nthreads)
except ValueError:
msg = "{0}:{1} must be an integer".format(config.DEFAULT_SECTION,
ZMQ_NTHREADS)
_logger.fatal(msg)
return 1
with _zmq_context_context(zmq_nthreads) as context, \
_zmq_socket_context(context, zmq.REP, queryendp) as querysock, \
_zmq_socket_context(context, zmq.PUB,
streamendp) as streamsock:
# Executing the program in the context of ZeroMQ context as well as
# ZeroMQ sockets. Using with here to make sure are correctly closing
# things in the correct order, particularly also if we have an
# exception or similar.
runner = _RewindRunner(eventstore, querysock, streamsock,
(exit_codeword.encode()
if exit_codeword
else None))
runner.run()
return 0
| 839,838 |
Entry point for Rewind.
Parses input and calls run() for the real work.
Parameters:
argv -- sys.argv arguments. Can be set for testing purposes.
returns -- the proposed exit code for the program.
|
def main(argv=None):
parser = argparse.ArgumentParser(
description='Event storage and event proxy.',
usage='%(prog)s <configfile>'
)
parser.add_argument('--exit-codeword', metavar="MSG", dest="exit_message",
default=None, help="An incoming message that makes"
" Rewind quit. Used for testing.")
parser.add_argument('configfile')
args = argv if argv is not None else sys.argv[1:]
args = parser.parse_args(args)
config = configparser.SafeConfigParser()
with open(args.configfile) as f:
config.readfp(f)
exitcode = run(config, args.exit_message)
return exitcode
| 839,839 |
Inserts or updates an existing index within the vector.
Args:
- insert_index (int): The index at which the element should be
inserted.
- val (int|float): The value to be inserted into the vector.
- fn (callable, optional): An optional callable taking two
arguments, the current value and the passed value to generate
the final inserted value at the position in case of collision.
|
def upsert(self, insert_index, val, fn=None):
fn = fn or (lambda current, passed: passed)
self._magnitude = 0
position = self.position_for_index(insert_index)
if position < len(self.elements) and self.elements[position] == insert_index:
self.elements[position + 1] = fn(self.elements[position + 1], val)
else:
self.elements.insert(position, val)
self.elements.insert(position, insert_index)
| 839,900 |
Instantiate an `LogEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `LogEventStore`.
|
def from_config(config, **options):
expected_args = ('path',)
rconfig.check_config_options("LogEventStore", expected_args, tuple(),
options)
return LogEventStore(options['path'])
| 840,000 |
Instantiate an `RotatedEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `RotatedEventStore`.
|
def from_config(config, **options):
expected_args = ('prefix', 'realclass')
for arg in expected_args:
if arg not in options:
msg = "Required option missing: {0}"
raise rconfig.ConfigurationError(msg.format(arg))
# Not logging unrecognized options here, because they might be used
# by the real event store instantiated below.
classpath = options['realclass']
classpath_pieces = classpath.split('.')
classname = classpath_pieces[-1]
modulepath = '.'.join(classpath_pieces[0:-1])
module = importlib.import_module(modulepath)
estore_class = getattr(module, classname)
return RotatedEventStore(lambda fname: estore_class(fname),
options['path'], options['prefix'])
| 840,008 |
Construct a filename for a database.
Parameters:
batchno -- batch number for the rotated database.
Returns the constructed path as a string.
|
def _construct_filename(self, batchno):
return os.path.join(self.dirpath,
"{0}.{1}".format(self.prefix, batchno))
| 840,011 |
Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found.
|
def _find_batch_containing_event(self, uuid):
if self.estore.key_exists(uuid):
# Reusing already opened DB if possible
return self.batchno
else:
for batchno in range(self.batchno - 1, -1, -1):
# Iterating backwards here because we are more likely to find
# the event in an later archive, than earlier.
db = self._open_event_store(batchno)
with contextlib.closing(db):
if db.key_exists(uuid):
return batchno
return None
| 840,013 |
Construct a persisted event store that is stored on disk.
Parameters:
events_per_batch -- number of events stored in a batch before rotating
the files. Defaults to 25000. That number is
arbitrary and should probably be configures so that
files do not grow out of proportion.
|
def __init__(self, events_per_batch=25000):
assert isinstance(events_per_batch, int), \
"Events per batch must be integer."
assert events_per_batch > 0, "Events per batch must be positive"
self.events_per_batch = events_per_batch
self.count = 0
self.stores = []
| 840,015 |
Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language.
|
def get_language_stemmer(language):
from lunr.languages import SUPPORTED_LANGUAGES
from nltk.stem.snowball import SnowballStemmer
return SnowballStemmer(SUPPORTED_LANGUAGES[language])
| 840,026 |
Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set.
|
def nltk_stemmer(stemmer, token, i=None, tokens=None):
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem)
| 840,027 |
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
|
def set_log_level(verbose, quiet):
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose)
| 840,077 |
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
|
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries
| 840,078 |
Process escaped characters in ``sval``.
Arguments:
- `sval`:
|
def sub_escapes(sval):
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval
| 840,079 |
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
|
def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries
| 840,080 |
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
|
def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval
| 840,082 |
Add/replace FITS key
Add/replace the key keyname with value keyval of type keytype in filename.
Parameters:
----------
keyname : str
FITS Keyword name.
keyval : str
FITS keyword value.
keytype: str
FITS keyword type: int, float, str or bool.
filaname : str
FITS filename.
extnum : int
Extension number where the keyword will be inserted. Note that
the first extension is number 1 (and not zero).
|
def add_key_val(keyname, keyval, keytype, filename, extnum):
funtype = {'int': int, 'float': float, 'str': str, 'bool': bool}
if keytype not in funtype:
raise ValueError('Undefined keyword type: ', keytype)
with fits.open(filename, "update") as hdulist:
hdulist[extnum].header[keyname] = funtype[keytype](keyval)
print('>>> Inserting ' + keyname + '=' + keyval + ' in ' + filename)
| 840,260 |
Normalize ``value`` to an Inspire category.
Args:
value(str): an Inspire category to properly case, or an arXiv category
to translate to the corresponding Inspire category.
Returns:
str: ``None`` if ``value`` is not a non-empty string,
otherwise the corresponding Inspire category.
|
def classify_field(value):
if not (isinstance(value, six.string_types) and value):
return
schema = load_schema('elements/inspire_field')
inspire_categories = schema['properties']['term']['enum']
for inspire_category in inspire_categories:
if value.upper() == inspire_category.upper():
return inspire_category
category = normalize_arxiv_category(value)
return ARXIV_TO_INSPIRE_CATEGORY_MAPPING.get(category, 'Other')
| 840,463 |
Load the given schema from wherever it's installed.
Args:
schema_name(str): Name of the schema to load, for example 'authors'.
resolved(bool): If True will return the resolved schema, that is with
all the $refs replaced by their targets.
Returns:
dict: the schema with the given name.
|
def load_schema(schema_name, resolved=False):
schema_data = ''
with open(get_schema_path(schema_name, resolved)) as schema_fd:
schema_data = json.loads(schema_fd.read())
return schema_data
| 840,468 |
Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
|
def _load_schema_for_record(data, schema=None):
if schema is None:
if '$schema' not in data:
raise SchemaKeyNotFound(data=data)
schema = data['$schema']
if isinstance(schema, six.string_types):
schema = load_schema(schema_name=schema)
return schema
| 840,469 |
Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS and ATLAS Collaborations')
['CMS', 'ATLAS']
|
def normalize_collaboration(collaboration):
if not collaboration:
return []
collaboration = collaboration.strip()
if collaboration.startswith('(') and collaboration.endswith(')'):
collaboration = collaboration[1:-1]
collaborations = _RE_AND.split(collaboration)
collaborations = (_RE_COLLABORATION_LEADING.sub('', collab)
for collab in collaborations)
collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab)
for collab in collaborations)
return [collab.strip() for collab in collaborations]
| 840,472 |
Get the license abbreviation from an URL.
Args:
url(str): canonical url of the license.
Returns:
str: the corresponding license abbreviation.
Raises:
ValueError: when the url is not recognized
|
def get_license_from_url(url):
if not url:
return
split_url = urlsplit(url, scheme='http')
if split_url.netloc.lower() == 'creativecommons.org':
if 'publicdomain' in split_url.path:
match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)
if match is None:
license = ['public domain']
else:
license = ['CC0']
license.extend(part for part in match.groups() if part)
else:
license = ['CC']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part.upper() for part in match.groups() if part)
elif split_url.netloc == 'arxiv.org':
license = ['arXiv']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part for part in match.groups() if part)
else:
raise ValueError('Unknown license URL')
return u' '.join(license)
| 840,473 |
Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
|
def convert_new_publication_info_to_old(publication_infos):
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return (
journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and
journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
)
result = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
year = _publication_info.get('year')
if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and
journal_volume and len(journal_volume) == 2):
two_digit_year = str(year)[2:]
_publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])
result.append(_publication_info)
continue
if journal_title and journal_volume:
match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)
if match and _needs_a_hidden_pubnote(journal_title, journal_volume):
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = journal_volume + match.group('letter')
result.append(_publication_info)
_publication_info = copy.deepcopy(publication_info)
_publication_info['hidden'] = True
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
result.append(_publication_info)
return result
| 840,475 |
Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference
|
def add_affiliation(self, value, curated_relation=None, record=None):
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation)
| 840,497 |
Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
|
def set_uid(self, uid, schema=None):
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
# the UID is invalid
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]
self._add_uid(uid, schema)
| 840,498 |
Return the page range or the article id of a publication_info entry.
Args:
publication_info(dict): a publication_info field entry of a record
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> publication_info = {'artid': '054021'}
>>> get_page_artid(publication_info)
'054021'
|
def get_page_artid_for_publication_info(publication_info, separator):
if 'artid' in publication_info:
return publication_info['artid']
elif 'page_start' in publication_info and 'page_end' in publication_info:
page_start = publication_info['page_start']
page_end = publication_info['page_end']
return text_type('{}{}{}').format(
page_start, text_type(separator), page_end
)
return ''
| 840,691 |
Return the page range or the article id of a record.
Args:
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> record = {
... 'publication_info': [
... {'artid': '054021'},
... ],
... }
>>> LiteratureReader(record).get_page_artid()
'054021'
|
def get_page_artid(self, separator='-'):
publication_info = get_value(
self.record,
'publication_info[0]',
default={}
)
return LiteratureReader.get_page_artid_for_publication_info(
publication_info,
separator
)
| 840,692 |
Add a keyword.
Args:
keyword(str): keyword to add.
schema(str): schema to which the keyword belongs.
source(str): source for the keyword.
|
def add_keyword(self, keyword, schema=None, source=None):
keyword_dict = self._sourced_dict(source, value=keyword)
if schema is not None:
keyword_dict['schema'] = schema
self._append_to('keywords', keyword_dict)
| 840,773 |
Add a figure.
Args:
key (string): document key
url (string): document url
Keyword Args:
caption (string): simple description
label (string):
material (string):
original_url (string): original url
filename (string): current url
Returns: None
|
def add_figure(self, key, url, **kwargs):
figure = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'caption',
'label',
'material',
'filename',
'url',
'original_url',
):
if kwargs.get(dict_key) is not None:
figure[dict_key] = kwargs[dict_key]
if key_already_there(figure, self.record.get('figures', ())):
raise ValueError(
'There\'s already a figure with the key %s.'
% figure['key']
)
self._append_to('figures', figure)
self.add_document
| 840,786 |
Adds document to record
Args:
key (string): document key
url (string): document url
Keyword Args:
description (string): simple description
fulltext (bool): mark if this is a full text
hidden (bool): is document should be hidden
material (string):
original_url (string): original url
filename (string): current url
Returns: None
|
def add_document(self, key, url, **kwargs):
document = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'description',
'fulltext',
'hidden',
'material',
'original_url',
'url',
'filename',
):
if kwargs.get(dict_key):
document[dict_key] = kwargs[dict_key]
if key_already_there(document, self.record.get('documents', ())):
raise ValueError(
'There\'s already a document with the key %s.'
% document['key']
)
self._append_to('documents', document)
| 840,787 |
Exception raised when a UID is not matching provided schema.
Args:
schema (string): given schema
uid (string): UID which conflicts the schema
|
def __init__(self, schema, uid):
message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
super(SchemaUIDConflict, self).__init__(message)
| 841,118 |
Exception raised when a schema of a UID is unknown.
Args:
uid (string): given UID
|
def __init__(self, uid):
message = 'Schema of UID "{}" is unrecognized.'.format(uid)
super(UnknownUIDSchema, self).__init__(message)
| 841,119 |
Function to produce a general 2D plot.
Args:
x (list): x points.
y (list): y points.
filename (str): Filename of the output image.
title (str): Title of the plot. Default is '' (no title).
x_label (str): x-axis label.
y_label (str): y-axis label.
|
def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''):
_, ext = os.path.splitext(filename)
if ext != '.png':
filename += '.png'
gnuplot_cmds = \
scr = _GnuplotScriptTemp(gnuplot_cmds)
data = _GnuplotDataTemp(x, y)
args_dict = {
'filename': filename,
'filename_data': data.name,
'title': title,
'x_label': x_label,
'y_label': y_label
}
gnuplot(scr.name, args_dict)
| 841,204 |
Function to produce a general 3D plot from a 2D matrix.
Args:
z_matrix (list): 2D matrix.
filename (str): Filename of the output image.
title (str): Title of the plot. Default is '' (no title).
x_label (str): x-axis label.
y_label (str): y-axis label.
|
def gnuplot_3d_matrix(z_matrix, filename, title='', x_label='', y_label=''):
_, ext = os.path.splitext(filename)
if ext != '.png':
filename += '.png'
gnuplot_cmds = \
scr = _GnuplotScriptTemp(gnuplot_cmds)
data = _GnuplotDataZMatrixTemp(z_matrix)
args_dict = {
'filename': filename,
'filename_data': data.name,
'title': title,
'x_label': x_label,
'y_label': y_label
}
gnuplot(scr.name, args_dict)
| 841,205 |
Append the ``element`` to the ``field`` of the record.
This method is smart: it does nothing if ``element`` is empty and
creates ``field`` if it does not exit yet.
Args:
:param field: the name of the field of the record to append to
:type field: string
:param element: the element to append
|
def _append_to(self, field, element):
if element not in EMPTIES:
self.obj.setdefault(field, [])
self.obj.get(field).append(element)
| 841,367 |
Add name variant.
Args:
:param name: name variant for the current author.
:type name: string
|
def add_name_variant(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('name_variants', []).append(name)
| 841,368 |
Add native name.
Args:
:param name: native name for the current author.
:type name: string
|
def add_native_name(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('native_names', []).append(name)
| 841,369 |
Add previous name.
Args:
:param name: previous name for the current author.
:type name: string
|
def add_previous_name(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('previous_names', []).append(name)
| 841,370 |
Add email address.
Args:
:param email: email of the author.
:type email: string
:param hidden: if email is public or not.
:type hidden: boolean
|
def add_email_address(self, email, hidden=None):
existing_emails = get_value(self.obj, 'email_addresses', [])
found_email = next(
(existing_email for existing_email in existing_emails if existing_email.get('value') == email),
None
)
if found_email is None:
new_email = {'value': email}
if hidden is not None:
new_email['hidden'] = hidden
self._append_to('email_addresses', new_email)
elif hidden is not None:
found_email['hidden'] = hidden
| 841,371 |
Add a personal website.
Args:
:param url: url to the person's website.
:type url: string
:param description: short description of the website.
:type description: string
|
def add_url(self, url, description=None):
url = {
'value': url,
}
if description:
url['description'] = description
self._append_to('urls', url)
| 841,372 |
Add a private note.
Args:
:param comment: comment about the author.
:type comment: string
:param source: the source of the comment.
:type source: string
|
def add_private_note(self, note, source=None):
note = {
'value': note,
}
if source:
note['source'] = source
self._append_to('_private_notes', note)
| 841,377 |
Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueError: when no start_page given for an end_page
|
def set_page_artid(self, page_start=None, page_end=None, artid=None):
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
if page_start:
publication_info['page_start'] = page_start
if page_end:
publication_info['page_end'] = page_end
if artid:
publication_info['artid'] = artid
| 841,934 |
Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error.
|
def reload_napps(self, napps=None):
if napps is None:
napps = []
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
'all')
response = self.make_request(endpoint)
for napp in napps:
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
napp[0], napp[1])
response = self.make_request(endpoint)
if response.status_code != 200:
raise KytosException('Error reloading the napp: Module not founded'
' or could not be imported')
return response.content
| 842,500 |
Send an user_dict to NApps server using POST request.
Args:
user_dict(dict): Dictionary with user attributes.
Returns:
result(string): Return the response of Napps server.
|
def register(self, user_dict):
endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '')
res = self.make_request(endpoint, method='POST', json=user_dict)
return res.content.decode('utf-8')
| 842,503 |
Enable a list of NApps.
Args:
napps (list): List of NApps.
|
def enable_napps(cls, napps):
mgr = NAppsManager()
for napp in napps:
mgr.set_napp(*napp)
LOG.info('NApp %s:', mgr.napp_id)
cls.enable_napp(mgr)
| 842,602 |
The group index with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the group
index will be evaluated at.
Returns:
float, list: The group index at the target wavelength(s).
|
def ng(self, wavelength):
return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength)
| 842,657 |
The group velocity dispersion (GVD) with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the GVD will
be evaluated at.
Returns:
float, list: The GVD at the target wavelength(s).
|
def gvd(self, wavelength):
g = (wavelength*1.e-9)**3./(2.*spc.pi*spc.c**2.) * self.nDer2(wavelength)
return g
| 842,658 |
Helpful function to evaluate Cauchy equations.
Args:
wavelength (float, list, None): The wavelength(s) the
Cauchy equation will be evaluated at.
coefficients (list): A list of the coefficients of
the Cauchy equation.
Returns:
float, list: The refractive index at the target wavelength(s).
|
def _cauchy_equation(wavelength, coefficients):
n = 0.
for i, c in enumerate(coefficients):
exponent = 2*i
n += c / wavelength**exponent
return n
| 842,659 |
Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
type check failure
Raises:
ValueError: If `model` is not an instance of the respective Model
class
|
def _isinstance(self, model, raise_error=True):
rv = isinstance(model, self.__model__)
if not rv and raise_error:
raise ValueError('%s is not of type %s' % (model, self.__model__))
return rv
| 842,727 |
Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
|
def _preprocess_params(cls, kwargs):
# kwargs.pop('csrf_token', None)
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(
**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs
| 842,728 |
Updates an instance.
Args:
**kwargs : Arbitrary keyword arguments. Column names are
keywords and their new values are the values.
Examples:
>>> customer.update(email="[email protected]", name="new")
|
def update(self, **kwargs):
kwargs = self._preprocess_params(kwargs)
kwargs = self.preprocess_kwargs_before_update(kwargs)
for key, value in kwargs.iteritems():
cls = type(self)
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
continue
if key not in self._no_overwrite_:
setattr(self, key, value)
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
try:
self.session.commit()
return self
except Exception as e:
self.session.rollback()
raise e
| 842,730 |
Same as SQLAlchemy's filter_by. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter_by(email="[email protected]")
>>> shipments = Shipment.filter_by(country="India", limit=3, reverse=True)
|
def filter_by(cls, **kwargs):
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q
| 842,731 |
Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="[email protected]")
>>> Customer.add(customer)
[email protected]
|
def add(cls, model, commit=True):
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' % (model, cls))
cls.session.add(model)
try:
if commit:
cls.session.commit()
return model
except:
cls.session.rollback()
raise
| 842,735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.