text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_regex(data, position, dummy0, dummy1):
"""Decode a BSON regex to bson.regex.Regex or a python pattern object."""
|
pattern, position = _get_c_string(data, position)
bson_flags, position = _get_c_string(data, position)
bson_re = Regex(pattern, bson_flags)
return bson_re, position
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _encode_mapping(name, value, check_keys, opts):
"""Encode a mapping type."""
|
data = b"".join([_element_to_bson(key, val, check_keys, opts)
for key, val in iteritems(value)])
return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _encode_code(name, value, dummy, opts):
"""Encode bson.code.Code."""
|
cstring = _make_c_string(value)
cstrlen = len(cstring)
if not value.scope:
return b"\x0D" + name + _PACK_INT(cstrlen) + cstring
scope = _dict_to_bson(value.scope, False, opts, False)
full_length = _PACK_INT(8 + cstrlen + len(scope))
return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simToReg(self, sim):
"""Convert simplified domain expression to regular expression"""
|
# remove initial slash if present
res = re.sub('^/', '', sim)
res = re.sub('/$', '', res)
return '^/?' + re.sub('\*', '[^/]+', res) + '/?$'
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def match(self, dom, act):
""" Check if the given `domain` and `act` are allowed by this capability """
|
return self.match_domain(dom) and self.match_action(act)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def to_list(self):
'''convert an actions bitmask into a list of action strings'''
res = []
for a in self.__class__.ACTIONS:
aBit = self.__class__.action_bitmask(a)
if ((self & aBit) == aBit):
res.append(a)
return res
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def from_list(cls, actions):
'''convert list of actions into the corresponding bitmask'''
bitmask = 0
for a in actions:
bitmask |= cls.action_bitmask(a)
return Action(bitmask)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def str_val(val):
""" Format the value of a metric value to a string :param val: number to be formatted :return: a string with the formatted value """
|
str_val = val
if val is None:
str_val = "NA"
elif type(val) == float:
str_val = '%0.2f' % val
else:
str_val = str(val)
return str_val
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def load_cfg(path, envvar_prefix='LIBREANT_', debug=False):
'''wrapper of config_utils.load_configs'''
try:
return load_configs(envvar_prefix, path=path)
except Exception as e:
if debug:
raise
else:
die(str(e))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files_in_subdir(dir, subdir):
"""Find all files in a directory."""
|
paths = []
for (path, dirs, files) in os.walk(os.path.join(dir, subdir)):
for file in files:
paths.append(os.path.relpath(os.path.join(path, file), dir))
return paths
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overview(index, start, end):
"""Compute metrics in the overview section for enriched git indexes. Returns a dictionary. Each key in the dictionary is the name of a metric, the value is the value of that metric. Value can be a complex object (eg, a time series). :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
|
results = {
"activity_metrics": [Commits(index, start, end)],
"author_metrics": [Authors(index, start, end)],
"bmi_metrics": [],
"time_to_close_metrics": [],
"projects_metrics": []
}
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched git index. Returns a dictionary containing a "metric" key. This key contains the metrics for this section. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
|
results = {
"metrics": [Commits(index, start, end),
Authors(index, start, end)]
}
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project_community(index, start, end):
"""Compute the metrics for the project community section of the enriched git index. Returns a dictionary containing "author_metrics", "people_top_metrics" and "orgs_top_metrics" as the keys and the related Metrics as the values. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
|
results = {
"author_metrics": [Authors(index, start, end)],
"people_top_metrics": [Authors(index, start, end)],
"orgs_top_metrics": [Organizations(index, start, end)],
}
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def aggregations(self):
""" Override parent method. Obtain list of the terms and their corresponding values using "terms" aggregations for the previous time period. :returns: a data frame containing terms and their corresponding values """
|
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
self.query.get_terms("author_name")
return self.query.get_list(dataframe=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched github issues index. Returns a dictionary containing a "metric" key. This key contains the metrics for this section. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics """
|
results = {
"metrics": [OpenedIssues(index, start, end),
ClosedIssues(index, start, end)]
}
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def aggregations(self):
"""Get the aggregation value for BMI with respect to the previous time interval."""
|
prev_month_start = get_prev_month(self.end,
self.closed.query.interval_)
self.closed.query.since(prev_month_start,
field="closed_at")
closed_agg = self.closed.aggregations()
self.opened.query.since(prev_month_start)
opened_agg = self.opened.aggregations()
if opened_agg == 0:
bmi = 1.0 # if no submitted issues/prs, bmi is at 100%
else:
bmi = closed_agg / opened_agg
return bmi
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def c_log(level, message):
"""python logger to be called from fortran"""
|
c_level = level
level = LEVELS_F2PY[c_level]
logger.log(level, message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def struct2dict(struct):
"""convert a ctypes structure to a dictionary"""
|
return {x: getattr(struct, x) for x in dict(struct._fields_).keys()}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def structs2records(structs):
"""convert one or more structs and generate dictionaries"""
|
try:
n = len(structs)
except TypeError:
# no array
yield struct2dict(structs)
# just 1
return
for i in range(n):
struct = structs[i]
yield struct2dict(struct)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def structs2pandas(structs):
"""convert ctypes structure or structure array to pandas data frame"""
|
try:
import pandas
records = list(structs2records(structs))
df = pandas.DataFrame.from_records(records)
# TODO: do this for string columns, for now just for id
# How can we check for string columns, this is not nice:
# df.columns[df.dtypes == object]
if 'id' in df:
df["id"] = df["id"].apply(str.rstrip)
return df
except ImportError:
# pandas not found, that's ok
return structs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wrap(func):
"""Return wrapped function with type conversion and sanity checks. """
|
@functools.wraps(func, assigned=('restype', 'argtypes'))
def wrapped(*args):
if len(args) != len(func.argtypes):
logger.warn("{} {} not of same length",
args, func.argtypes)
typed_args = []
for (arg, argtype) in zip(args, func.argtypes):
if argtype == c_char_p:
# create a string buffer for strings
typed_arg = create_string_buffer(arg)
else:
# for other types, use the type to do the conversion
if hasattr(argtype, 'contents'):
# type is a pointer
typed_arg = argtype(argtype._type_(arg))
else:
typed_arg = argtype(arg)
typed_args.append(typed_arg)
result = func(*typed_args)
if hasattr(result, 'contents'):
return result.contents
else:
return result
return wrapped
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _libname(self):
"""Return platform-specific modelf90 shared library name."""
|
prefix = 'lib'
suffix = '.so'
if platform.system() == 'Darwin':
suffix = '.dylib'
if platform.system() == 'Windows':
prefix = ''
suffix = '.dll'
return prefix + self.engine + suffix
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _library_path(self):
"""Return full path to the shared library. A couple of regular unix paths like ``/usr/lib/`` is searched by default. If your library is not in one of those, set a ``LD_LIBRARY_PATH`` environment variable to the directory with your shared library. If the library cannot be found, a ``RuntimeError`` with debug information is raised. """
|
# engine is an existing library name
# TODO change add directory to library path
if os.path.isfile(self.engine):
return self.engine
pathname = 'LD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Darwin':
pathname = 'DYLD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Windows':
# windows does not separate between dll path's and exe paths
pathname = 'PATH'
separator = ';'
lib_path_from_environment = os.environ.get(pathname, '')
# Expand the paths with the system path if it exists
if lib_path_from_environment:
known_paths = [
path for path in lib_path_from_environment.split(separator)] + self.known_paths
else:
known_paths = self.known_paths
# expand ~
known_paths = [os.path.expanduser(path) for path in known_paths]
possible_libraries = [os.path.join(path, self._libname())
for path in known_paths]
for library in possible_libraries:
if os.path.exists(library):
logger.info("Using model fortran library %s", library)
return library
msg = "Library not found, looked in %s" % ', '.join(possible_libraries)
raise RuntimeError(msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_library(self):
"""Return the fortran library, loaded with """
|
path = self._library_path()
logger.info("Loading library from path {}".format(path))
library_dir = os.path.dirname(path)
if platform.system() == 'Windows':
import win32api
olddir = os.getcwd()
os.chdir(library_dir)
win32api.SetDllDirectory('.')
result = cdll.LoadLibrary(path)
if platform.system() == 'Windows':
os.chdir(olddir)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
"""Shutdown the library and clean up the model. Note that the Fortran library's cleanup code is not up to snuff yet, so the cleanup is not perfect. Note also that the working directory is changed back to the original one. """
|
self.library.finalize.argtypes = []
self.library.finalize.restype = c_int
ierr = wrap(self.library.finalize)()
# always go back to previous directory
logger.info('cd {}'.format(self.original_dir))
# This one doesn't work.
os.chdir(self.original_dir)
if ierr:
errormsg = "Finalizing model {engine} failed with exit code {code}"
raise RuntimeError(errormsg.format(engine=self.engine, code=ierr))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_var_count(self):
""" Return number of variables """
|
n = c_int()
self.library.get_var_count.argtypes = [POINTER(c_int)]
self.library.get_var_count(byref(n))
return n.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inq_compound_field(self, name, index):
""" Lookup the type,rank and shape of a compound field """
|
typename = create_string_buffer(name)
index = c_int(index + 1)
fieldname = create_string_buffer(MAXSTRLEN)
fieldtype = create_string_buffer(MAXSTRLEN)
rank = c_int()
arraytype = ndpointer(dtype='int32',
ndim=1,
shape=(MAXDIMS, ),
flags='F')
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
self.library.inq_compound_field.argtypes = [c_char_p,
POINTER(c_int),
c_char_p,
c_char_p,
POINTER(c_int),
arraytype]
self.library.inq_compound_field.restype = None
self.library.inq_compound_field(typename,
byref(index),
fieldname,
fieldtype,
byref(rank),
shape)
return (fieldname.value,
fieldtype.value,
rank.value,
tuple(shape[:rank.value]))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_compound_ctype(self, varname):
""" Create a ctypes type that corresponds to a compound type in memory. """
|
# look up the type name
compoundname = self.get_var_type(varname)
nfields = self.inq_compound(compoundname)
# for all the fields look up the type, rank and shape
fields = []
for i in range(nfields):
(fieldname, fieldtype,
fieldrank, fieldshape) = self.inq_compound_field(compoundname, i)
assert fieldrank <= 1
fieldctype = CTYPESMAP[fieldtype]
if fieldrank == 1:
fieldctype = fieldctype * fieldshape[0]
fields.append((fieldname, fieldctype))
# create a new structure
class COMPOUND(Structure):
_fields_ = fields
# if we have a rank 1 array, create an array
rank = self.get_var_rank(varname)
assert rank <= 1, "we can't handle >=2 dimensional compounds yet"
if rank == 1:
shape = self.get_var_shape(varname)
valtype = POINTER(ARRAY(COMPOUND, shape[0]))
else:
valtype = POINTER(COMPOUND)
# return the custom type
return valtype
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_var_rank(self, name):
""" Return array rank or 0 for scalar. """
|
name = create_string_buffer(name)
rank = c_int()
self.library.get_var_rank.argtypes = [c_char_p, POINTER(c_int)]
self.library.get_var_rank.restype = None
self.library.get_var_rank(name, byref(rank))
return rank.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_var_shape(self, name):
""" Return shape of the array. """
|
rank = self.get_var_rank(name)
name = create_string_buffer(name)
arraytype = ndpointer(dtype='int32',
ndim=1,
shape=(MAXDIMS, ),
flags='F')
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
self.library.get_var_shape.argtypes = [c_char_p, arraytype]
self.library.get_var_shape(name, shape)
return tuple(shape[:rank])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_start_time(self):
""" returns start time """
|
start_time = c_double()
self.library.get_start_time.argtypes = [POINTER(c_double)]
self.library.get_start_time.restype = None
self.library.get_start_time(byref(start_time))
return start_time.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_end_time(self):
""" returns end time of simulation """
|
end_time = c_double()
self.library.get_end_time.argtypes = [POINTER(c_double)]
self.library.get_end_time.restype = None
self.library.get_end_time(byref(end_time))
return end_time.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_current_time(self):
""" returns current time of simulation """
|
current_time = c_double()
self.library.get_current_time.argtypes = [POINTER(c_double)]
self.library.get_current_time.restype = None
self.library.get_current_time(byref(current_time))
return current_time.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_time_step(self):
""" returns current time step of simulation """
|
time_step = c_double()
self.library.get_time_step.argtypes = [POINTER(c_double)]
self.library.get_time_step.restype = None
self.library.get_time_step(byref(time_step))
return time_step.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_var(self, name):
"""Return an nd array from model library"""
|
# How many dimensions.
rank = self.get_var_rank(name)
# The shape array is fixed size
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
shape = self.get_var_shape(name)
# there should be nothing here...
assert sum(shape[rank:]) == 0
# variable type name
type_ = self.get_var_type(name)
is_numpytype = type_ in TYPEMAP
if is_numpytype:
# Store the data in this type
arraytype = ndpointer(dtype=TYPEMAP[type_],
ndim=rank,
shape=shape,
flags='F')
# '' or b''
elif not type_:
raise ValueError('type not found for variable {}'.format(name))
else:
arraytype = self.make_compound_ctype(name)
# Create a pointer to the array type
data = arraytype()
# The functions get_var_type/_shape/_rank are already wrapped with
# python function converter, get_var isn't.
c_name = create_string_buffer(name)
get_var = self.library.get_var
get_var.argtypes = [c_char_p, POINTER(arraytype)]
get_var.restype = None
# Get the array
get_var(c_name, byref(data))
if not data:
logger.info("NULL pointer returned")
return None
if is_numpytype:
# for now always a pointer, see python-subgrid for advanced examples
array = np.ctypeslib.as_array(data)
else:
array = structs2pandas(data.contents)
return array
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_logger(self, logger):
"""subscribe to fortran log messages"""
|
# we don't expect anything back
try:
self.library.set_logger.restype = None
except AttributeError:
logger.warn("Tried to set logger but method is not implemented in %s", self.engine)
return
# as an argument we need a pointer to a fortran log func...
self.library.set_logger.argtypes = [
(fortran_log_functype)]
self.library.set_logger(fortran_log_func)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_current_time(self, current_time):
""" sets current time of simulation """
|
current_time = c_double(current_time)
try:
self.library.set_current_time.argtypes = [POINTER(c_double)]
self.library.set_current_time.restype = None
self.library.set_current_time(byref(current_time))
except AttributeError:
logger.warn("Tried to set current time but method is not implemented in %s", self.engine)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def setup_db(self, wait_for_ready=True):
''' Create and configure index
If `wait_for_ready` is True, this function will block until
status for `self.index_name` will be `yellow`
'''
if self.es.indices.exists(self.index_name):
try:
self.update_mappings()
except MappingsException as ex:
log.error(ex)
log.warn('An old or wrong properties mapping has been found for index: "{0}",\
this could led to some errors. It is recomanded to run "libreant-db upgrade"'.format(self.index_name))
else:
log.debug("Index is missing: '{0}'".format(self.index_name))
self.create_index()
if wait_for_ready:
log.debug('waiting for index "{}" to be ready'.format(self.index_name))
self.es.cluster.health(index=self.index_name, level='index', wait_for_status='yellow')
log.debug('index "{}" is now ready'.format(self.index_name))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def create_index(self, indexname=None, index_conf=None):
''' Create the index
Create the index with given configuration.
If `indexname` is provided it will be used as the new index name
instead of the class one (:py:attr:`DB.index_name`)
:param index_conf: configuration to be used in index creation. If this
is not specified the default index configuration will be used.
:raises Exception: if the index already exists.
'''
if indexname is None:
indexname = self.index_name
log.debug("Creating new index: '{0}'".format(indexname))
if index_conf is None:
index_conf = {'settings': self.settings,
'mappings': {'book': {'properties': self.properties}}}
try:
self.es.indices.create(index=indexname, body=index_conf)
except TransportError as te:
if te.error.startswith("IndexAlreadyExistsException"):
raise Exception("Cannot create index '{}', already exists".format(indexname))
else:
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def clone_index(self, new_indexname, index_conf=None):
'''Clone current index
All entries of the current index will be copied into the newly
created one named `new_indexname`
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
'''
log.debug("Cloning index '{}' into '{}'".format(self.index_name, new_indexname))
self.create_index(indexname=new_indexname, index_conf=index_conf)
reindex(self.es, self.index_name, new_indexname)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def reindex(self, new_index=None, index_conf=None):
'''Rebuilt the current index
This function could be useful in the case you want to change some index settings/mappings
and you don't want to loose all the entries belonging to that index.
This function is built in such a way that you can continue to use the old index name,
this is achieved using index aliases.
The old index will be cloned into a new one with the given `index_conf`.
If we are working on an alias, it is redirected to the new index.
Otherwise a brand new alias with the old index name is created in such a way that
points to the newly create index.
Keep in mind that even if you can continue to use the same index name,
the old index will be deleted.
:param index_conf: Configuration to be used in the new index creation.
This param will be passed directly to :py:func:`DB.create_index`
'''
alias = self.index_name if self.es.indices.exists_alias(name=self.index_name) else None
if alias:
original_index=self.es.indices.get_alias(self.index_name).popitem()[0]
else:
original_index=self.index_name
if new_index is None:
mtc = re.match(r"^.*_v(\d)*$", original_index)
if mtc:
new_index = original_index[:mtc.start(1)] + str(int(mtc.group(1)) + 1)
else:
new_index = original_index + '_v1'
log.debug("Reindexing {{ alias: '{}', original_index: '{}', new_index: '{}'}}".format(alias, original_index, new_index))
self.clone_index(new_index, index_conf=index_conf)
if alias:
log.debug("Moving alias from ['{0}' -> '{1}'] to ['{0}' -> '{2}']".format(alias, original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "remove" : { "alias": alias, "index" : original_index} },
{ "add" : { "alias": alias, "index" : new_index } }
]})
log.debug("Deleting old index: '{}'".format(original_index))
self.es.indices.delete(original_index)
if not alias:
log.debug("Crating new alias: ['{0}' -> '{1}']".format(original_index, new_index))
self.es.indices.update_aliases(body={
"actions" : [
{ "add" : { "alias": original_index, "index" : new_index } }
]})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def mlt(self, _id):
'''
High-level method to do "more like this".
Its exact implementation can vary.
'''
query = {
'query': {'more_like_this': {
'like': {'_id': _id},
'min_term_freq': 1,
'min_doc_freq': 1,
}}
}
if es_version[0] <= 1:
mlt = query['query']['more_like_this']
mlt['ids'] = [_id]
del mlt['like']
return self._search(query)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def file_is_attached(self, url):
'''return true if at least one book has
file with the given url as attachment
'''
body = self._get_search_field('_attachments.url', url)
return self.es.count(index=self.index_name, body=body)['count'] > 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def delete_all(self):
'''Delete all books from the index'''
def delete_action_gen():
scanner = scan(self.es,
index=self.index_name,
query={'query': {'match_all':{}}})
for v in scanner:
yield { '_op_type': 'delete',
'_index': self.index_name,
'_type': v['_type'],
'_id': v['_id'],
}
bulk(self.es, delete_action_gen())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def update_book(self, id, body, doc_type='book'):
''' Update a book
The "body" is merged with the current one.
Yes, it is NOT overwritten.
In case of concurrency conflict
this function could raise `elasticsearch.ConflictError`
'''
# note that we are NOT overwriting all the _source, just merging
book = self.get_book_by_id(id)
book['_source'].update(body)
validated = validate_book(book['_source'])
ret = self.es.index(index=self.index_name, id=id,
doc_type=doc_type, body=validated, version=book['_version'])
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def modify_book(self, id, body, doc_type='book', version=None):
''' replace the entire book body
Instead of `update_book` this function
will overwrite the book content with param body
If param `version` is given, it will be checked that the
changes are applied upon that document version.
If the document version provided is different from the one actually found,
an `elasticsearch.ConflictError` will be raised
'''
validatedBody = validate_book(body)
params = dict(index=self.index_name, id=id, doc_type=doc_type, body=validatedBody)
if version:
params['version'] = version
ret = self.es.index(**params)
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def increment_download_count(self, id, attachmentID, doc_type='book'):
'''
Increment the download counter of a specific file
'''
body = self.es.get(index=self.index_name, id=id, doc_type='book', _source_include='_attachments')['_source']
for attachment in body['_attachments']:
if attachment['id'] == attachmentID:
attachment['download_count'] += 1
self.es.update(index=self.index_name,
id=id,
doc_type=doc_type,
body={"doc": {'_attachments': body['_attachments']}})
return
raise NotFoundError("No attachment could be found with id: {}".format(attachmentID))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __add_types(self, raw_conf):
""" Convert to int, boolean, list, None types config items """
|
typed_conf = {}
for s in raw_conf.keys():
typed_conf[s] = {}
for option in raw_conf[s]:
val = raw_conf[s][option]
if len(val) > 1 and (val[0] == '"' and val[-1] == '"'):
# It is a string
typed_conf[s][option] = val[1:-1]
# Check list
elif len(val) > 1 and (val[0] == '[' and val[-1] == ']'):
# List value
typed_conf[s][option] = val[1:-1].replace(' ', '').split(',')
# Check boolean
elif val.lower() in ['true', 'false']:
typed_conf[s][option] = True if val.lower() == 'true' else False
# Check None
elif val.lower() is 'none':
typed_conf[s][option] = None
else:
try:
# Check int
typed_conf[s][option] = int(val)
except ValueError:
# Is a string
typed_conf[s][option] = val
return typed_conf
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Elasticsearch(*args, **kwargs):
"""Elasticsearch wrapper function Wrapper function around the official Elasticsearch class that adds a simple version check upon initialization. In particular it checks if the major version of the library in use match the one of the cluster that we are tring to interact with. The check can be skipped by setting to false the check_version parameter. #note: Boyska didn't like subclassing :) """
|
check_version = kwargs.pop('check_version', True)
es = Elasticsearch_official(*args, **kwargs)
if check_version:
es_version = es.info()['version']['number'].split('.')
if(int(es_version[0]) != int(es_pylib_version[0])):
raise RuntimeError("The Elasticsearch python library version does not match the one of the running cluster: {} != {}. Please install the correct elasticsearch-py version".format(es_pylib_version[0], es_version[0]))
return es
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_envvars(prefix=None, environ=None, envvars=None, as_json=True):
"""Load environment variables in a dictionary Values are parsed as JSON. If parsing fails with a ValueError, values are instead used as verbatim strings. :param prefix: If ``None`` is passed as envvars, all variables from ``environ`` starting with this prefix are imported. The prefix is stripped upon import. :param envvars: A dictionary of mappings of environment-variable-names to Flask configuration names. If a list is passed instead, names are mapped 1:1. If ``None``, see prefix argument. :param environ: use this dictionary instead of os.environ; this is here mostly for mockability :param as_json: If False, values will not be parsed as JSON first. """
|
conf = {}
if environ is None:
environ = os.environ
if prefix is None and envvars is None:
raise RuntimeError('Must either give prefix or envvars argument')
# if it's a list, convert to dict
if isinstance(envvars, list):
envvars = {k: k for k in envvars}
if not envvars:
envvars = {k: k[len(prefix):] for k in environ.keys()
if k.startswith(prefix)}
for env_name, name in envvars.items():
if env_name not in environ:
continue
if as_json:
try:
conf[name] = json.loads(environ[env_name])
except ValueError:
conf[name] = environ[env_name]
else:
conf[name] = environ[env_name]
return conf
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_bmi(closed, submitted):
""" BMI is the ratio of the number of closed items to the number of total items submitted in a particular period of analysis. The items can be issues, pull requests and such :param closed: dataframe returned from get_timeseries() containing closed items :param submitted: dataframe returned from get_timeseries() containing total items :returns: a dataframe with "date" and "bmi" columns where the date column is also the index. bmi is the ratio of the number of items closed by the total number of items submitted in a "period" of analysis """
|
if sorted(closed.keys()) != sorted(submitted.keys()):
raise AttributeError("The buckets supplied are not congruent!")
dates = closed.index.values
closed_values = closed['value']
submitted_values = submitted['value']
ratios = []
for x, y in zip(closed_values, submitted_values):
if y == 0:
ratios.append(0.0)
else:
ratios.append(float("%.2f" % (x / y)))
df = pd.DataFrame.from_records({"date": dates, "bmi": ratios}, index="date")
return df.fillna(0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_query(self, key_val={}):
""" Add an es_dsl query object to the es_dsl Search object :param key_val: a key-value pair(dict) containing the query to be added to the search object :returns: self, which allows the method to be chainable with the other methods """
|
q = Q("match", **key_val)
self.search = self.search.query(q)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_inverse_query(self, key_val={}):
""" Add an es_dsl inverse query object to the es_dsl Search object :param key_val: a key-value pair(dict) containing the query to be added to the search object :returns: self, which allows the method to be chainable with the other methods """
|
q = Q("match", **key_val)
self.search = self.search.query(~q)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_sum(self, field=None):
""" Create a sum aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("sum", field=field)
self.aggregations['sum_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_average(self, field=None):
""" Create an avg aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("avg", field=field)
self.aggregations['avg_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_percentiles(self, field=None, percents=None):
""" Create a percentile aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :param percents: the specific percentiles to be calculated default: [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0] :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
if not percents:
percents = [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0]
agg = A("percentiles", field=field, percents=percents)
self.aggregations['percentiles_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_terms(self, field=None):
""" Create a terms aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("terms", field=field, size=self.size, order={"_count": "desc"})
self.aggregations['terms_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_min(self, field=None):
""" Create a min aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("min", field=field)
self.aggregations['min_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_max(self, field=None):
""" Create a max aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("max", field=field)
self.aggregations['max_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_cardinality(self, field=None):
""" Create a cardinality aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("cardinality", field=field, precision_threshold=self.precision_threshold)
self.aggregations['cardinality_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_extended_stats(self, field=None):
""" Create an extended_stats aggregation object and add it to the aggregation dict :param field: the field present in the index that is to be aggregated :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("extended_stats", field=field)
self.aggregations['extended_stats_' + field] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_custom_aggregation(self, agg, name=None):
""" Takes in an es_dsl Aggregation object and adds it to the aggregation dict. Can be used to add custom aggregations such as moving averages :param agg: aggregation to be added to the es_dsl search object :param name: name of the aggregation object (optional) :returns: self, which allows the method to be chainable with the other methods """
|
agg_name = name if name else 'custom_agg'
self.aggregations[agg_name] = agg
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def since(self, start, field=None):
""" Add the start date to query data starting from that date sets the default start date for each query :param start: date to start looking at the fields (from date) :param field: specific field for the start date in range filter for the Search object :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
field = "grimoire_creation_date"
self.start_date = start
date_dict = {field: {"gte": "{}".format(self.start_date.isoformat())}}
self.search = self.search.filter("range", **date_dict)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def until(self, end, field=None):
""" Add the end date to query data upto that date sets the default end date for each query :param end: date to stop looking at the fields (to date) :param field: specific field for the end date in range filter for the Search object :returns: self, which allows the method to be chainable with the other methods """
|
if not field:
field = "grimoire_creation_date"
self.end_date = end
date_dict = {field: {"lte": "{}".format(self.end_date.isoformat())}}
self.search = self.search.filter("range", **date_dict)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_organizations(self, field=None):
""" Used to seggregate the data acording to organizations. This method pops the latest aggregation from the self.aggregations dict and adds it as a nested aggregation under itself :param field: the field to create the parent agg (optional) default: author_org_name :returns: self, which allows the method to be chainable with the other methods """
|
# this functions is currently only for issues and PRs
agg_field = field if field else "author_org_name"
agg_key = "terms_" + agg_field
if agg_key in self.aggregations.keys():
agg = self.aggregations[agg_key]
else:
agg = A("terms", field=agg_field, missing="others", size=self.size)
child_agg_counter = self.child_agg_counter_dict[agg_key] # 0 if not present because defaultdict
child_name, child_agg = self.aggregations.popitem()
agg.metric(child_agg_counter, child_agg)
self.aggregations[agg_key] = agg
self.child_agg_counter_dict[agg_key] += 1
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def by_period(self, field=None, period=None, timezone=None, start=None, end=None):
""" Create a date histogram aggregation using the last added aggregation for the current object. Add this date_histogram aggregation into self.aggregations :param field: the index field to create the histogram from :param period: the interval which elasticsearch supports, ex: "month", "week" and such :param timezone: custom timezone :param start: custom start date for the date histogram, default: start date under range :param end: custom end date for the date histogram, default: end date under range :returns: self, which allows the method to be chainable with the other methods """
|
hist_period = period if period else self.interval_
time_zone = timezone if timezone else "UTC"
start_ = start if start else self.start_date
end_ = end if end else self.end_date
bounds = self.get_bounds(start_, end_)
date_field = field if field else "grimoire_creation_date"
agg_key = "date_histogram_" + date_field
if agg_key in self.aggregations.keys():
agg = self.aggregations[agg_key]
else:
agg = A("date_histogram", field=date_field, interval=hist_period,
time_zone=time_zone, min_doc_count=0, **bounds)
child_agg_counter = self.child_agg_counter_dict[agg_key]
child_name, child_agg = self.aggregations.popitem()
agg.metric(child_agg_counter, child_agg)
self.aggregations[agg_key] = agg
self.child_agg_counter_dict[agg_key] += 1
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_bounds(self, start=None, end=None):
""" Get bounds for the date_histogram method :param start: start date to set the extended_bounds min field :param end: end date to set the extended_bounds max field :returns bounds: a dictionary containing the min and max fields required to set the bounds in date_histogram aggregation """
|
bounds = {}
if start or end:
# Extend bounds so we have data until start and end
start_ts = None
end_ts = None
if start:
start = start.replace(microsecond=0)
start_ts = start.replace(tzinfo=timezone.utc).timestamp()
start_ts_ms = start_ts * 1000 # ES uses ms
if end:
end = end.replace(microsecond=0)
end_ts = end.replace(tzinfo=timezone.utc).timestamp()
end_ts_ms = end_ts * 1000 # ES uses ms
bounds_data = {}
if start:
bounds_data["min"] = start_ts_ms
if end:
bounds_data["max"] = end_ts_ms
bounds["extended_bounds"] = bounds_data
return bounds
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reset_aggregations(self):
""" Remove all aggregations added to the search object """
|
temp_search = self.search.to_dict()
if 'aggs' in temp_search.keys():
del temp_search['aggs']
self.search.from_dict(temp_search)
self.parent_agg_counter = 0
self.child_agg_counter = 0
self.child_agg_counter_dict = defaultdict(int)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_aggregation_results(self):
""" Loops though the self.aggregations dict and adds them to the Search object in order in which they were created. Queries elasticsearch and returns a dict containing the results :returns: a dictionary containing the response from elasticsearch """
|
self.reset_aggregations()
for key, val in self.aggregations.items():
self.search.aggs.bucket(self.parent_agg_counter, val)
self.parent_agg_counter += 1
self.search = self.search.extra(size=0)
response = self.search.execute()
self.flush_aggregations()
return response.to_dict()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_results_from_source(self, *fields, dataframe=False):
""" Get values for specific fields in the elasticsearch index, from source :param fields: a list of fields that have to be retrieved from the index :param dataframe: if true, will return the data in the form of a pandas.DataFrame :returns: a list of dicts(key_val pairs) containing the values for the applied fields if dataframe=True, will return the a dataframe containing the data in rows and the fields representing column names """
|
if not fields:
raise AttributeError("Please provide the fields to get from elasticsearch!")
self.reset_aggregations()
self.search = self.search.extra(_source=fields)
self.search = self.search.extra(size=self.size)
response = self.search.execute()
hits = response.to_dict()['hits']['hits']
data = [item["_source"] for item in hits]
if dataframe:
df = pd.DataFrame.from_records(data)
return df.fillna(0)
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_timeseries(self, child_agg_count=0, dataframe=False):
""" Get time series data for the specified fields and period of analysis :param child_agg_count: the child aggregation count to be used default = 0 :param dataframe: if dataframe=True, return a pandas.DataFrame object :returns: dictionary containing "date", "value" and "unixtime" keys with lists as values containing data from each bucket in the aggregation """
|
res = self.fetch_aggregation_results()
ts = {"date": [], "value": [], "unixtime": []}
if 'buckets' not in res['aggregations'][str(self.parent_agg_counter - 1)]:
raise RuntimeError("Aggregation results have no buckets in time series results.")
for bucket in res['aggregations'][str(self.parent_agg_counter - 1)]['buckets']:
ts['date'].append(parser.parse(bucket['key_as_string']).date())
if str(child_agg_count) in bucket:
# We have a subaggregation with the value
# If it is percentiles we get the median
if 'values' in bucket[str(child_agg_count)]:
val = bucket[str(child_agg_count)]['values']['50.0']
if val == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
val = None
ts['value'].append(val)
else:
ts['value'].append(bucket[str(child_agg_count)]['value'])
else:
ts['value'].append(bucket['doc_count'])
# unixtime comes in ms from ElasticSearch
ts['unixtime'].append(bucket['key'] / 1000)
if dataframe:
df = pd.DataFrame.from_records(ts, index="date")
return df.fillna(0)
return ts
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_aggs(self):
""" Compute the values for single valued aggregations :returns: the single aggregation value """
|
res = self.fetch_aggregation_results()
if 'aggregations' in res and 'values' in res['aggregations'][str(self.parent_agg_counter - 1)]:
try:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['values']["50.0"]
if agg == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
agg = None
except Exception as e:
raise RuntimeError("Multivalue aggregation result not supported")
elif 'aggregations' in res and 'value' in res['aggregations'][str(self.parent_agg_counter - 1)]:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['value']
else:
agg = res['hits']['total']
return agg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, dataframe=False):
""" Compute the value for multi-valued aggregations :returns: a dict containing 'keys' and their corresponding 'values' """
|
res = self.fetch_aggregation_results()
keys = []
values = []
for bucket in res['aggregations'][str(self.parent_agg_counter - 1)]['buckets']:
keys.append(bucket['key'])
values.append(bucket['doc_count'])
result = {"keys": keys, "values": values}
if dataframe:
result = pd.DataFrame.from_records(result)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def upgrade(check_only, yes):
'''
Upgrade libreant database.
This command can be used after an update of libreant
in order to upgrade the database and make it aligned with the new version.
'''
from utils.es import Elasticsearch
from libreantdb import DB, migration
from libreantdb.exceptions import MappingsException
try:
db = DB(Elasticsearch(hosts=conf['ES_HOSTS']),
index_name=conf['ES_INDEXNAME'])
if not db.es.indices.exists(db.index_name):
die("The specified index does not exists: {}".format(db.index_name))
# Migrate old special `_timestamp` field into the new `_insertion_date`
num_to_update = migration.elements_without_insertion_date(db.es, db.index_name)
if num_to_update > 0:
if check_only:
exit(123)
if yes or click.confirm("{} entries miss the '_insertion_date' field. Do you want to proceed and update those entries?".format(num_to_update),
prompt_suffix='',
default=False):
migration.migrate_timestamp(db.es, db.index_name)
else:
exit(0)
# Upgrade the index mappings and reindex if necessary
try:
db.update_mappings()
except MappingsException:
if check_only:
exit(123)
count = db.es.count(index=db.index_name)['count']
if yes or click.confirm("Some old or wrong mappings has been found for the index '"+ db.index_name +"'.\n"\
"In order to upgrade them it is necessary to reindex '"+ str(count) +"' entries.\n"\
"Are you sure you want to proceed?",
prompt_suffix='',
default=False):
db.reindex()
except Exception as e:
if conf.get('DEBUG', False):
raise
else:
die(str(e))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def insert_volume(language, filepath, notes, metadata):
'''
Add a new volume to libreant.
The metadata of the volume are taken from a json file whose path must be
passed as argument. Passing "-" as argument will read the file from stdin.
language is an exception, because it must be set using --language
For every attachment you must add a --file AND a --notes.
\b
Examples:
Adds a volume with no metadata. Yes, it makes no sense but you can
libreant-db insert-volume -l en - <<<'{}'
Adds a volume with no files attached
libreant-db insert-volume -l en - <<EOF
{
"title": "How to create volumes",
"actors": ["libreant devs", "open access conspiration"]
}
EOF
Adds a volume with one attachment but no metadata
libreant-db insert-volume -l en -f /path/book.epub --notes 'poor quality'
Adds a volume with two attachments but no metadata
libreant-db insert-volume -l en -f /path/book.epub --notes 'poor quality' -f /path/someother.epub --notes 'preprint'
'''
meta = {"_language": language}
if metadata:
meta.update(json.load(metadata))
attachments = attach_list(filepath, notes)
try:
out = arc.insert_volume(meta, attachments)
except Exception:
die('An upload error have occurred!', exit_code=4)
click.echo(out)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def attach_list(filepaths, notes):
'''
all the arguments are lists
returns a list of dictionaries; each dictionary "represent" an attachment
'''
assert type(filepaths) in (list, tuple)
assert type(notes) in (list, tuple)
# this if clause means "if those lists are not of the same length"
if len(filepaths) != len(notes):
die('The number of --filepath, and --notes must be the same')
attach_list = []
for fname, note in zip(filepaths, notes):
name = os.path.basename(fname)
assert os.path.exists(fname)
mime = mimetypes.guess_type(fname)[0]
if mime is not None and '/' not in mime:
mime = None
attach_list.append({
'file': fname,
'name': name,
'mime': mime,
'note': note
})
return attach_list
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _param_fields(kwargs, fields):
""" Normalize the "fields" argument to most find methods """
|
if fields is None:
return
if type(fields) in [list, set, frozenset, tuple]:
fields = {x: True for x in fields}
if type(fields) == dict:
fields.setdefault("_id", False)
kwargs["projection"] = fields
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def patch_cursor(cursor, batch_size=None, limit=None, skip=None, sort=None, **kwargs):
""" Adds batch_size, limit, sort parameters to a DB cursor """
|
if type(batch_size) == int:
cursor.batch_size(batch_size)
if limit is not None:
cursor.limit(limit)
if sort is not None:
cursor.sort(sort)
if skip is not None:
cursor.skip(skip)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exists(self, query, **args):
""" Returns True if the search matches at least one document """
|
return bool(self.find(query, **args).limit(1).count())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _collection_with_options(self, kwargs):
""" Returns a copy of the pymongo collection with various options set up """
|
# class DocumentClassWithFields(self.document_class):
# _fetched_fields = kwargs.get("projection")
# mongokat_collection = self
read_preference = kwargs.get("read_preference") or getattr(self.collection, "read_preference", None) or ReadPreference.PRIMARY
if "read_preference" in kwargs:
del kwargs["read_preference"]
# Simplified tag usage
if "read_use" in kwargs:
if kwargs["read_use"] == "primary":
read_preference = ReadPreference.PRIMARY
elif kwargs["read_use"] == "secondary":
read_preference = ReadPreference.SECONDARY
elif kwargs["read_use"] == "nearest":
read_preference = ReadPreference.NEAREST
elif kwargs["read_use"]:
read_preference = read_preferences.Secondary(tag_sets=[{"use": kwargs["read_use"]}])
del kwargs["read_use"]
write_concern = None
if kwargs.get("w") is 0:
write_concern = WriteConcern(w=0)
elif kwargs.get("write_concern"):
write_concern = kwargs.get("write_concern")
codec_options = CodecOptions(
document_class=(
self.document_class,
{
"fetched_fields": kwargs.get("projection"),
"mongokat_collection": self
}
)
)
return self.collection.with_options(
codec_options=codec_options,
read_preference=read_preference,
write_concern=write_concern
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_by_b64id(self, _id, **kwargs):
""" Pass me a base64-encoded ObjectId """
|
return self.find_one({"_id": ObjectId(base64.b64decode(_id))}, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_by_b64ids(self, _ids, **kwargs):
""" Pass me a list of base64-encoded ObjectId """
|
return self.find_by_ids([ObjectId(base64.b64decode(_id)) for _id in _ids], **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_column(self, query=None, field="_id", **kwargs):
""" Return one field as an iterator. Beware that if your query returns records where the field is not set, it will raise a KeyError. """
|
find_kwargs = {
"projection": {"_id": False}
}
find_kwargs["projection"][field] = True
cursor = self._collection_with_options(kwargs).find(query, **find_kwargs) # We only want 1 field: bypass the ORM
patch_cursor(cursor, **kwargs)
return (dotdict(x)[field] for x in cursor)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_random(self, **kwargs):
""" return one random document from the collection """
|
import random
max = self.count(**kwargs)
if max:
num = random.randint(0, max - 1)
return next(self.find(**kwargs).skip(num))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert(self, data, return_object=False):
""" Inserts the data as a new document. """
|
obj = self(data) # pylint: disable=E1102
obj.save()
if return_object:
return obj
else:
return obj["_id"]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def trigger(self, event, filter=None, update=None, documents=None, ids=None, replacements=None):
""" Trigger the after_save hook on documents, if present. """
|
if not self.has_trigger(event):
return
if documents is not None:
pass
elif ids is not None:
documents = self.find_by_ids(ids, read_use="primary")
elif filter is not None:
documents = self.find(filter, read_use="primary")
else:
raise Exception("Trigger couldn't filter documents")
for doc in documents:
getattr(doc, event)(update=update, replacements=replacements)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def usage(ecode, msg=''):
""" Print usage and msg and exit with given code. """
|
print >> sys.stderr, __doc__
if msg:
print >> sys.stderr, msg
sys.exit(ecode)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(msgid, transtr, fuzzy):
""" Add a non-fuzzy translation to the dictionary. """
|
global MESSAGES
if not fuzzy and transtr and not transtr.startswith('\0'):
MESSAGES[msgid] = transtr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate():
""" Return the generated output. """
|
global MESSAGES
keys = MESSAGES.keys()
# the keys are sorted in the .mo file
keys.sort()
offsets = []
ids = strs = ''
for _id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(_id), len(strs), len(MESSAGES[_id])))
ids += _id + '\0'
strs += MESSAGES[_id] + '\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7 * 4 + 16 * len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412deL, # Magic
0, # Version
len(keys), # # of entries
7 * 4, # start of key index
7 * 4 + len(keys) * 8, # start of value index
0, 0) # size and offset of hash table
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_es_requirements(es_version):
'''Get the requirements string for elasticsearch-py library
Returns a suitable requirements string for the elsaticsearch-py library
according to the elasticsearch version to be supported (es_version)'''
# accepts version range in the form `2.x`
es_version = es_version.replace('x', '0')
es_version = map(int, es_version.split('.'))
if es_version >= [6]:
return ">=6.0.0, <7.0.0"
elif es_version >= [5]:
return ">=5.0.0, <6.0.0"
elif es_version >= [2]:
return ">=2.0.0, <3.0.0"
elif es_version >= [1]:
return ">=1.0.0, <2.0.0"
else:
return "<1.0.0"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Compile all message catalogs .po files into .mo files. Skips not changed file based on source mtime. """
|
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_response_element_data(self, key1, key2):
""" For each origin an elements object is created in the ouput. For each destination, an object is created inside elements object. For example, if there are 2 origins and 1 destination, 2 element objects with 1 object each are created. If there are 2 origins and 2 destinations, 2 element objects with 2 objects each are created. """
|
if not self.dict_response[key1][key2]:
l = self.response
for i, orig in enumerate(self.origins):
self.dict_response[key1][key2][orig] = {}
for j, dest in enumerate(self.destinations):
if l[i]['elements'][j]['status'] == 'OK':
self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j][key1][key2]
else:
self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j]['status']
return self.dict_response[key1][key2]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None):
""" Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance. """
|
if not self.dict_response['distance']['value']:
self.get_distance_values()
if origin_raw:
origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw])
else:
origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]])
tmp_origin = copy.deepcopy(origin)
if max_distance is not None:
for k, v in tmp_origin.iteritems():
if v > max_distance or v == 'ZERO_RESULTS':
del(origin[k])
return origin
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename_node(self, prefix):
""" Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs. """
|
node_map_dict = {}
# map each node to its new name (e.g. "a1")
for i in range(0, len(self.nodes)):
node_map_dict[self.nodes[i]] = prefix + str(i)
# update node name
for i, v in enumerate(self.nodes):
self.nodes[i] = node_map_dict[v]
# update node name in relations
for node_relations in self.relations:
for i, l in enumerate(node_relations):
node_relations[i][1] = node_map_dict[l[1]]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bar3_chart(self, title, labels, data1, file_name, data2, data3, legend=["", ""]):
""" Generate a bar plot with three columns in each x position and save it to file_name :param title: title to be used in the chart :param labels: list of labels for the x axis :param data1: values for the first columns :param file_name: name of the file in which to save the chart :param data2: values for the second columns :param data3: values for the third columns :param legend: legend to be shown in the chart :return: """
|
colors = ["orange", "grey"]
data1 = self.__convert_none_to_zero(data1)
data2 = self.__convert_none_to_zero(data2)
data3 = self.__convert_none_to_zero(data3)
fig, ax = plt.subplots(1)
xpos = np.arange(len(data1))
width = 0.28
plt.title(title)
y_pos = np.arange(len(data1))
ppl.bar(xpos + width + width, data3, color="orange", width=0.28, annotate=True)
ppl.bar(xpos + width, data1, color='grey', width=0.28, annotate=True)
ppl.bar(xpos, data2, grid='y', width=0.28, annotate=True)
plt.xticks(xpos + width, labels)
plt.legend(legend, loc=2)
os.makedirs(os.path.dirname(file_name), exist_ok=True)
plt.savefig(file_name)
plt.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sections(self):
""" Get the sections of the report and howto build them. :return: a dict with the method to be called to fill each section of the report """
|
secs = OrderedDict()
secs['Overview'] = self.sec_overview
secs['Communication Channels'] = self.sec_com_channels
secs['Detailed Activity by Project'] = self.sec_projects
return secs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_text(filepath, to_replace, replacement):
""" Replaces a string in a given file with another string :param file: the file in which the string has to be replaced :param to_replace: the string to be replaced in the file :param replacement: the string which replaces 'to_replace' in the file """
|
with open(filepath) as file:
s = file.read()
s = s.replace(to_replace, replacement)
with open(filepath, 'w') as file:
file.write(s)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_text_dir(self, directory, to_replace, replacement, file_type=None):
""" Replaces a string with its replacement in all the files in the directory :param directory: the directory in which the files have to be modified :param to_replace: the string to be replaced in the files :param replacement: the string which replaces 'to_replace' in the files :param file_type: file pattern to match the files in which the string has to be replaced """
|
if not file_type:
file_type = "*.tex"
for file in glob.iglob(os.path.join(directory, file_type)):
self.replace_text(file, to_replace, replacement)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pprint_table(table):
""" Print a table in pretty format """
|
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(get_max_width(table,i))
for row in table:
print(row[0].ljust(col_paddings[0] + 1), end="")
for i in range(1, len(row)):
col = str(row[i]).rjust(col_paddings[i]+2)
print(col, end='')
print("\n")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cb(option, value, parser):
""" Callback function to handle variable number of arguments in optparse """
|
arguments = [value]
for arg in parser.rargs:
if arg[0] != "-":
arguments.append(arg)
else:
del parser.rargs[:len(arguments)]
break
if getattr(parser.values, option.dest):
arguments.extend(getattr(parser.values, option.dest))
setattr(parser.values, option.dest, arguments)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.