text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Format the value of a metric value to a string
<END_TASK>
<USER_TASK:>
Description:
def str_val(val):
"""
Format the value of a metric value to a string
:param val: number to be formatted
:return: a string with the formatted value
""" |
str_val = val
if val is None:
str_val = "NA"
elif type(val) == float:
str_val = '%0.2f' % val
else:
str_val = str(val)
return str_val |
<SYSTEM_TASK:>
Compute metrics in the overview section for enriched git indexes.
<END_TASK>
<USER_TASK:>
Description:
def overview(index, start, end):
"""Compute metrics in the overview section for enriched git indexes.
Returns a dictionary. Each key in the dictionary is the name of
a metric, the value is the value of that metric. Value can be
a complex object (eg, a time series).
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
""" |
results = {
"activity_metrics": [Commits(index, start, end)],
"author_metrics": [Authors(index, start, end)],
"bmi_metrics": [],
"time_to_close_metrics": [],
"projects_metrics": []
}
return results |
<SYSTEM_TASK:>
Compute the metrics for the project activity section of the enriched
<END_TASK>
<USER_TASK:>
Description:
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched
git index.
Returns a dictionary containing a "metric" key. This key contains the
metrics for this section.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
""" |
results = {
"metrics": [Commits(index, start, end),
Authors(index, start, end)]
}
return results |
<SYSTEM_TASK:>
Compute the metrics for the project community section of the enriched
<END_TASK>
<USER_TASK:>
Description:
def project_community(index, start, end):
"""Compute the metrics for the project community section of the enriched
git index.
Returns a dictionary containing "author_metrics", "people_top_metrics"
and "orgs_top_metrics" as the keys and the related Metrics as the values.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
""" |
results = {
"author_metrics": [Authors(index, start, end)],
"people_top_metrics": [Authors(index, start, end)],
"orgs_top_metrics": [Organizations(index, start, end)],
}
return results |
<SYSTEM_TASK:>
Override parent method. Obtain list of the terms and their corresponding
<END_TASK>
<USER_TASK:>
Description:
def aggregations(self):
"""
Override parent method. Obtain list of the terms and their corresponding
values using "terms" aggregations for the previous time period.
:returns: a data frame containing terms and their corresponding values
""" |
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
self.query.get_terms("author_name")
return self.query.get_list(dataframe=True) |
<SYSTEM_TASK:>
Compute the metrics for the project activity section of the enriched
<END_TASK>
<USER_TASK:>
Description:
def project_activity(index, start, end):
"""Compute the metrics for the project activity section of the enriched
github issues index.
Returns a dictionary containing a "metric" key. This key contains the
metrics for this section.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
""" |
results = {
"metrics": [OpenedIssues(index, start, end),
ClosedIssues(index, start, end)]
}
return results |
<SYSTEM_TASK:>
Get the aggregation value for BMI with respect to the previous
<END_TASK>
<USER_TASK:>
Description:
def aggregations(self):
"""Get the aggregation value for BMI with respect to the previous
time interval.""" |
prev_month_start = get_prev_month(self.end,
self.closed.query.interval_)
self.closed.query.since(prev_month_start,
field="closed_at")
closed_agg = self.closed.aggregations()
self.opened.query.since(prev_month_start)
opened_agg = self.opened.aggregations()
if opened_agg == 0:
bmi = 1.0 # if no submitted issues/prs, bmi is at 100%
else:
bmi = closed_agg / opened_agg
return bmi |
<SYSTEM_TASK:>
python logger to be called from fortran
<END_TASK>
<USER_TASK:>
Description:
def c_log(level, message):
"""python logger to be called from fortran""" |
c_level = level
level = LEVELS_F2PY[c_level]
logger.log(level, message) |
<SYSTEM_TASK:>
convert a ctypes structure to a dictionary
<END_TASK>
<USER_TASK:>
Description:
def struct2dict(struct):
"""convert a ctypes structure to a dictionary""" |
return {x: getattr(struct, x) for x in dict(struct._fields_).keys()} |
<SYSTEM_TASK:>
convert one or more structs and generate dictionaries
<END_TASK>
<USER_TASK:>
Description:
def structs2records(structs):
"""convert one or more structs and generate dictionaries""" |
try:
n = len(structs)
except TypeError:
# no array
yield struct2dict(structs)
# just 1
return
for i in range(n):
struct = structs[i]
yield struct2dict(struct) |
<SYSTEM_TASK:>
convert ctypes structure or structure array to pandas data frame
<END_TASK>
<USER_TASK:>
Description:
def structs2pandas(structs):
"""convert ctypes structure or structure array to pandas data frame""" |
try:
import pandas
records = list(structs2records(structs))
df = pandas.DataFrame.from_records(records)
# TODO: do this for string columns, for now just for id
# How can we check for string columns, this is not nice:
# df.columns[df.dtypes == object]
if 'id' in df:
df["id"] = df["id"].apply(str.rstrip)
return df
except ImportError:
# pandas not found, that's ok
return structs |
<SYSTEM_TASK:>
Return wrapped function with type conversion and sanity checks.
<END_TASK>
<USER_TASK:>
Description:
def wrap(func):
"""Return wrapped function with type conversion and sanity checks.
""" |
@functools.wraps(func, assigned=('restype', 'argtypes'))
def wrapped(*args):
if len(args) != len(func.argtypes):
logger.warn("{} {} not of same length",
args, func.argtypes)
typed_args = []
for (arg, argtype) in zip(args, func.argtypes):
if argtype == c_char_p:
# create a string buffer for strings
typed_arg = create_string_buffer(arg)
else:
# for other types, use the type to do the conversion
if hasattr(argtype, 'contents'):
# type is a pointer
typed_arg = argtype(argtype._type_(arg))
else:
typed_arg = argtype(arg)
typed_args.append(typed_arg)
result = func(*typed_args)
if hasattr(result, 'contents'):
return result.contents
else:
return result
return wrapped |
<SYSTEM_TASK:>
Return platform-specific modelf90 shared library name.
<END_TASK>
<USER_TASK:>
Description:
def _libname(self):
"""Return platform-specific modelf90 shared library name.""" |
prefix = 'lib'
suffix = '.so'
if platform.system() == 'Darwin':
suffix = '.dylib'
if platform.system() == 'Windows':
prefix = ''
suffix = '.dll'
return prefix + self.engine + suffix |
<SYSTEM_TASK:>
Return full path to the shared library.
<END_TASK>
<USER_TASK:>
Description:
def _library_path(self):
"""Return full path to the shared library.
A couple of regular unix paths like ``/usr/lib/`` is searched by
default. If your library is not in one of those, set a
``LD_LIBRARY_PATH`` environment variable to the directory with your
shared library.
If the library cannot be found, a ``RuntimeError`` with debug
information is raised.
""" |
# engine is an existing library name
# TODO change add directory to library path
if os.path.isfile(self.engine):
return self.engine
pathname = 'LD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Darwin':
pathname = 'DYLD_LIBRARY_PATH'
separator = ':'
if platform.system() == 'Windows':
# windows does not separate between dll path's and exe paths
pathname = 'PATH'
separator = ';'
lib_path_from_environment = os.environ.get(pathname, '')
# Expand the paths with the system path if it exists
if lib_path_from_environment:
known_paths = [
path for path in lib_path_from_environment.split(separator)] + self.known_paths
else:
known_paths = self.known_paths
# expand ~
known_paths = [os.path.expanduser(path) for path in known_paths]
possible_libraries = [os.path.join(path, self._libname())
for path in known_paths]
for library in possible_libraries:
if os.path.exists(library):
logger.info("Using model fortran library %s", library)
return library
msg = "Library not found, looked in %s" % ', '.join(possible_libraries)
raise RuntimeError(msg) |
<SYSTEM_TASK:>
Return the fortran library, loaded with
<END_TASK>
<USER_TASK:>
Description:
def _load_library(self):
"""Return the fortran library, loaded with """ |
path = self._library_path()
logger.info("Loading library from path {}".format(path))
library_dir = os.path.dirname(path)
if platform.system() == 'Windows':
import win32api
olddir = os.getcwd()
os.chdir(library_dir)
win32api.SetDllDirectory('.')
result = cdll.LoadLibrary(path)
if platform.system() == 'Windows':
os.chdir(olddir)
return result |
<SYSTEM_TASK:>
Shutdown the library and clean up the model.
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
"""Shutdown the library and clean up the model.
Note that the Fortran library's cleanup code is not up to snuff yet,
so the cleanup is not perfect. Note also that the working directory is
changed back to the original one.
""" |
self.library.finalize.argtypes = []
self.library.finalize.restype = c_int
ierr = wrap(self.library.finalize)()
# always go back to previous directory
logger.info('cd {}'.format(self.original_dir))
# This one doesn't work.
os.chdir(self.original_dir)
if ierr:
errormsg = "Finalizing model {engine} failed with exit code {code}"
raise RuntimeError(errormsg.format(engine=self.engine, code=ierr)) |
<SYSTEM_TASK:>
Lookup the type,rank and shape of a compound field
<END_TASK>
<USER_TASK:>
Description:
def inq_compound_field(self, name, index):
"""
Lookup the type,rank and shape of a compound field
""" |
typename = create_string_buffer(name)
index = c_int(index + 1)
fieldname = create_string_buffer(MAXSTRLEN)
fieldtype = create_string_buffer(MAXSTRLEN)
rank = c_int()
arraytype = ndpointer(dtype='int32',
ndim=1,
shape=(MAXDIMS, ),
flags='F')
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
self.library.inq_compound_field.argtypes = [c_char_p,
POINTER(c_int),
c_char_p,
c_char_p,
POINTER(c_int),
arraytype]
self.library.inq_compound_field.restype = None
self.library.inq_compound_field(typename,
byref(index),
fieldname,
fieldtype,
byref(rank),
shape)
return (fieldname.value,
fieldtype.value,
rank.value,
tuple(shape[:rank.value])) |
<SYSTEM_TASK:>
Create a ctypes type that corresponds to a compound type in memory.
<END_TASK>
<USER_TASK:>
Description:
def make_compound_ctype(self, varname):
"""
Create a ctypes type that corresponds to a compound type in memory.
""" |
# look up the type name
compoundname = self.get_var_type(varname)
nfields = self.inq_compound(compoundname)
# for all the fields look up the type, rank and shape
fields = []
for i in range(nfields):
(fieldname, fieldtype,
fieldrank, fieldshape) = self.inq_compound_field(compoundname, i)
assert fieldrank <= 1
fieldctype = CTYPESMAP[fieldtype]
if fieldrank == 1:
fieldctype = fieldctype * fieldshape[0]
fields.append((fieldname, fieldctype))
# create a new structure
class COMPOUND(Structure):
_fields_ = fields
# if we have a rank 1 array, create an array
rank = self.get_var_rank(varname)
assert rank <= 1, "we can't handle >=2 dimensional compounds yet"
if rank == 1:
shape = self.get_var_shape(varname)
valtype = POINTER(ARRAY(COMPOUND, shape[0]))
else:
valtype = POINTER(COMPOUND)
# return the custom type
return valtype |
<SYSTEM_TASK:>
Return array rank or 0 for scalar.
<END_TASK>
<USER_TASK:>
Description:
def get_var_rank(self, name):
"""
Return array rank or 0 for scalar.
""" |
name = create_string_buffer(name)
rank = c_int()
self.library.get_var_rank.argtypes = [c_char_p, POINTER(c_int)]
self.library.get_var_rank.restype = None
self.library.get_var_rank(name, byref(rank))
return rank.value |
<SYSTEM_TASK:>
Return shape of the array.
<END_TASK>
<USER_TASK:>
Description:
def get_var_shape(self, name):
"""
Return shape of the array.
""" |
rank = self.get_var_rank(name)
name = create_string_buffer(name)
arraytype = ndpointer(dtype='int32',
ndim=1,
shape=(MAXDIMS, ),
flags='F')
shape = np.empty((MAXDIMS, ), dtype='int32', order='F')
self.library.get_var_shape.argtypes = [c_char_p, arraytype]
self.library.get_var_shape(name, shape)
return tuple(shape[:rank]) |
<SYSTEM_TASK:>
returns end time of simulation
<END_TASK>
<USER_TASK:>
Description:
def get_end_time(self):
"""
returns end time of simulation
""" |
end_time = c_double()
self.library.get_end_time.argtypes = [POINTER(c_double)]
self.library.get_end_time.restype = None
self.library.get_end_time(byref(end_time))
return end_time.value |
<SYSTEM_TASK:>
returns current time of simulation
<END_TASK>
<USER_TASK:>
Description:
def get_current_time(self):
"""
returns current time of simulation
""" |
current_time = c_double()
self.library.get_current_time.argtypes = [POINTER(c_double)]
self.library.get_current_time.restype = None
self.library.get_current_time(byref(current_time))
return current_time.value |
<SYSTEM_TASK:>
returns current time step of simulation
<END_TASK>
<USER_TASK:>
Description:
def get_time_step(self):
"""
returns current time step of simulation
""" |
time_step = c_double()
self.library.get_time_step.argtypes = [POINTER(c_double)]
self.library.get_time_step.restype = None
self.library.get_time_step(byref(time_step))
return time_step.value |
<SYSTEM_TASK:>
subscribe to fortran log messages
<END_TASK>
<USER_TASK:>
Description:
def set_logger(self, logger):
"""subscribe to fortran log messages""" |
# we don't expect anything back
try:
self.library.set_logger.restype = None
except AttributeError:
logger.warn("Tried to set logger but method is not implemented in %s", self.engine)
return
# as an argument we need a pointer to a fortran log func...
self.library.set_logger.argtypes = [
(fortran_log_functype)]
self.library.set_logger(fortran_log_func) |
<SYSTEM_TASK:>
sets current time of simulation
<END_TASK>
<USER_TASK:>
Description:
def set_current_time(self, current_time):
"""
sets current time of simulation
""" |
current_time = c_double(current_time)
try:
self.library.set_current_time.argtypes = [POINTER(c_double)]
self.library.set_current_time.restype = None
self.library.set_current_time(byref(current_time))
except AttributeError:
logger.warn("Tried to set current time but method is not implemented in %s", self.engine) |
<SYSTEM_TASK:>
Convert to int, boolean, list, None types config items
<END_TASK>
<USER_TASK:>
Description:
def __add_types(self, raw_conf):
""" Convert to int, boolean, list, None types config items """ |
typed_conf = {}
for s in raw_conf.keys():
typed_conf[s] = {}
for option in raw_conf[s]:
val = raw_conf[s][option]
if len(val) > 1 and (val[0] == '"' and val[-1] == '"'):
# It is a string
typed_conf[s][option] = val[1:-1]
# Check list
elif len(val) > 1 and (val[0] == '[' and val[-1] == ']'):
# List value
typed_conf[s][option] = val[1:-1].replace(' ', '').split(',')
# Check boolean
elif val.lower() in ['true', 'false']:
typed_conf[s][option] = True if val.lower() == 'true' else False
# Check None
elif val.lower() is 'none':
typed_conf[s][option] = None
else:
try:
# Check int
typed_conf[s][option] = int(val)
except ValueError:
# Is a string
typed_conf[s][option] = val
return typed_conf |
<SYSTEM_TASK:>
Elasticsearch wrapper function
<END_TASK>
<USER_TASK:>
Description:
def Elasticsearch(*args, **kwargs):
"""Elasticsearch wrapper function
Wrapper function around the official Elasticsearch class that adds
a simple version check upon initialization.
In particular it checks if the major version of the library in use
match the one of the cluster that we are tring to interact with.
The check can be skipped by setting to false the check_version parameter.
#note: Boyska didn't like subclassing :)
""" |
check_version = kwargs.pop('check_version', True)
es = Elasticsearch_official(*args, **kwargs)
if check_version:
es_version = es.info()['version']['number'].split('.')
if(int(es_version[0]) != int(es_pylib_version[0])):
raise RuntimeError("The Elasticsearch python library version does not match the one of the running cluster: {} != {}. Please install the correct elasticsearch-py version".format(es_pylib_version[0], es_version[0]))
return es |
<SYSTEM_TASK:>
Load environment variables in a dictionary
<END_TASK>
<USER_TASK:>
Description:
def from_envvars(prefix=None, environ=None, envvars=None, as_json=True):
"""Load environment variables in a dictionary
Values are parsed as JSON. If parsing fails with a ValueError,
values are instead used as verbatim strings.
:param prefix: If ``None`` is passed as envvars, all variables from
``environ`` starting with this prefix are imported. The
prefix is stripped upon import.
:param envvars: A dictionary of mappings of environment-variable-names
to Flask configuration names. If a list is passed
instead, names are mapped 1:1. If ``None``, see prefix
argument.
:param environ: use this dictionary instead of os.environ; this is here
mostly for mockability
:param as_json: If False, values will not be parsed as JSON first.
""" |
conf = {}
if environ is None:
environ = os.environ
if prefix is None and envvars is None:
raise RuntimeError('Must either give prefix or envvars argument')
# if it's a list, convert to dict
if isinstance(envvars, list):
envvars = {k: k for k in envvars}
if not envvars:
envvars = {k: k[len(prefix):] for k in environ.keys()
if k.startswith(prefix)}
for env_name, name in envvars.items():
if env_name not in environ:
continue
if as_json:
try:
conf[name] = json.loads(environ[env_name])
except ValueError:
conf[name] = environ[env_name]
else:
conf[name] = environ[env_name]
return conf |
<SYSTEM_TASK:>
BMI is the ratio of the number of closed items to the number of total items
<END_TASK>
<USER_TASK:>
Description:
def calculate_bmi(closed, submitted):
"""
BMI is the ratio of the number of closed items to the number of total items
submitted in a particular period of analysis. The items can be issues, pull
requests and such
:param closed: dataframe returned from get_timeseries() containing closed items
:param submitted: dataframe returned from get_timeseries() containing total items
:returns: a dataframe with "date" and "bmi" columns where the date column is also
the index.
bmi is the ratio of the number of items closed by the total
number of items submitted in a "period" of analysis
""" |
if sorted(closed.keys()) != sorted(submitted.keys()):
raise AttributeError("The buckets supplied are not congruent!")
dates = closed.index.values
closed_values = closed['value']
submitted_values = submitted['value']
ratios = []
for x, y in zip(closed_values, submitted_values):
if y == 0:
ratios.append(0.0)
else:
ratios.append(float("%.2f" % (x / y)))
df = pd.DataFrame.from_records({"date": dates, "bmi": ratios}, index="date")
return df.fillna(0) |
<SYSTEM_TASK:>
Add an es_dsl query object to the es_dsl Search object
<END_TASK>
<USER_TASK:>
Description:
def add_query(self, key_val={}):
"""
Add an es_dsl query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods
""" |
q = Q("match", **key_val)
self.search = self.search.query(q)
return self |
<SYSTEM_TASK:>
Add an es_dsl inverse query object to the es_dsl Search object
<END_TASK>
<USER_TASK:>
Description:
def add_inverse_query(self, key_val={}):
"""
Add an es_dsl inverse query object to the es_dsl Search object
:param key_val: a key-value pair(dict) containing the query to be added to the search object
:returns: self, which allows the method to be chainable with the other methods
""" |
q = Q("match", **key_val)
self.search = self.search.query(~q)
return self |
<SYSTEM_TASK:>
Create a sum aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_sum(self, field=None):
"""
Create a sum aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("sum", field=field)
self.aggregations['sum_' + field] = agg
return self |
<SYSTEM_TASK:>
Create an avg aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_average(self, field=None):
"""
Create an avg aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("avg", field=field)
self.aggregations['avg_' + field] = agg
return self |
<SYSTEM_TASK:>
Create a percentile aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_percentiles(self, field=None, percents=None):
"""
Create a percentile aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:param percents: the specific percentiles to be calculated
default: [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0]
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
if not percents:
percents = [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0]
agg = A("percentiles", field=field, percents=percents)
self.aggregations['percentiles_' + field] = agg
return self |
<SYSTEM_TASK:>
Create a terms aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_terms(self, field=None):
"""
Create a terms aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("terms", field=field, size=self.size, order={"_count": "desc"})
self.aggregations['terms_' + field] = agg
return self |
<SYSTEM_TASK:>
Create a min aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_min(self, field=None):
"""
Create a min aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("min", field=field)
self.aggregations['min_' + field] = agg
return self |
<SYSTEM_TASK:>
Create a max aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_max(self, field=None):
"""
Create a max aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("max", field=field)
self.aggregations['max_' + field] = agg
return self |
<SYSTEM_TASK:>
Create a cardinality aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_cardinality(self, field=None):
"""
Create a cardinality aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("cardinality", field=field, precision_threshold=self.precision_threshold)
self.aggregations['cardinality_' + field] = agg
return self |
<SYSTEM_TASK:>
Create an extended_stats aggregation object and add it to the aggregation dict
<END_TASK>
<USER_TASK:>
Description:
def get_extended_stats(self, field=None):
"""
Create an extended_stats aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("extended_stats", field=field)
self.aggregations['extended_stats_' + field] = agg
return self |
<SYSTEM_TASK:>
Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
<END_TASK>
<USER_TASK:>
Description:
def add_custom_aggregation(self, agg, name=None):
"""
Takes in an es_dsl Aggregation object and adds it to the aggregation dict.
Can be used to add custom aggregations such as moving averages
:param agg: aggregation to be added to the es_dsl search object
:param name: name of the aggregation object (optional)
:returns: self, which allows the method to be chainable with the other methods
""" |
agg_name = name if name else 'custom_agg'
self.aggregations[agg_name] = agg
return self |
<SYSTEM_TASK:>
Add the start date to query data starting from that date
<END_TASK>
<USER_TASK:>
Description:
def since(self, start, field=None):
"""
Add the start date to query data starting from that date
sets the default start date for each query
:param start: date to start looking at the fields (from date)
:param field: specific field for the start date in range filter
for the Search object
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
field = "grimoire_creation_date"
self.start_date = start
date_dict = {field: {"gte": "{}".format(self.start_date.isoformat())}}
self.search = self.search.filter("range", **date_dict)
return self |
<SYSTEM_TASK:>
Add the end date to query data upto that date
<END_TASK>
<USER_TASK:>
Description:
def until(self, end, field=None):
"""
Add the end date to query data upto that date
sets the default end date for each query
:param end: date to stop looking at the fields (to date)
:param field: specific field for the end date in range filter
for the Search object
:returns: self, which allows the method to be chainable with the other methods
""" |
if not field:
field = "grimoire_creation_date"
self.end_date = end
date_dict = {field: {"lte": "{}".format(self.end_date.isoformat())}}
self.search = self.search.filter("range", **date_dict)
return self |
<SYSTEM_TASK:>
Used to seggregate the data acording to organizations. This method
<END_TASK>
<USER_TASK:>
Description:
def by_organizations(self, field=None):
"""
Used to seggregate the data acording to organizations. This method
pops the latest aggregation from the self.aggregations dict and
adds it as a nested aggregation under itself
:param field: the field to create the parent agg (optional)
default: author_org_name
:returns: self, which allows the method to be chainable with the other methods
""" |
# this functions is currently only for issues and PRs
agg_field = field if field else "author_org_name"
agg_key = "terms_" + agg_field
if agg_key in self.aggregations.keys():
agg = self.aggregations[agg_key]
else:
agg = A("terms", field=agg_field, missing="others", size=self.size)
child_agg_counter = self.child_agg_counter_dict[agg_key] # 0 if not present because defaultdict
child_name, child_agg = self.aggregations.popitem()
agg.metric(child_agg_counter, child_agg)
self.aggregations[agg_key] = agg
self.child_agg_counter_dict[agg_key] += 1
return self |
<SYSTEM_TASK:>
Create a date histogram aggregation using the last added aggregation for the
<END_TASK>
<USER_TASK:>
Description:
def by_period(self, field=None, period=None, timezone=None, start=None, end=None):
"""
Create a date histogram aggregation using the last added aggregation for the
current object. Add this date_histogram aggregation into self.aggregations
:param field: the index field to create the histogram from
:param period: the interval which elasticsearch supports, ex: "month", "week" and such
:param timezone: custom timezone
:param start: custom start date for the date histogram, default: start date under range
:param end: custom end date for the date histogram, default: end date under range
:returns: self, which allows the method to be chainable with the other methods
""" |
hist_period = period if period else self.interval_
time_zone = timezone if timezone else "UTC"
start_ = start if start else self.start_date
end_ = end if end else self.end_date
bounds = self.get_bounds(start_, end_)
date_field = field if field else "grimoire_creation_date"
agg_key = "date_histogram_" + date_field
if agg_key in self.aggregations.keys():
agg = self.aggregations[agg_key]
else:
agg = A("date_histogram", field=date_field, interval=hist_period,
time_zone=time_zone, min_doc_count=0, **bounds)
child_agg_counter = self.child_agg_counter_dict[agg_key]
child_name, child_agg = self.aggregations.popitem()
agg.metric(child_agg_counter, child_agg)
self.aggregations[agg_key] = agg
self.child_agg_counter_dict[agg_key] += 1
return self |
<SYSTEM_TASK:>
Get bounds for the date_histogram method
<END_TASK>
<USER_TASK:>
Description:
def get_bounds(self, start=None, end=None):
"""
Get bounds for the date_histogram method
:param start: start date to set the extended_bounds min field
:param end: end date to set the extended_bounds max field
:returns bounds: a dictionary containing the min and max fields
required to set the bounds in date_histogram aggregation
""" |
bounds = {}
if start or end:
# Extend bounds so we have data until start and end
start_ts = None
end_ts = None
if start:
start = start.replace(microsecond=0)
start_ts = start.replace(tzinfo=timezone.utc).timestamp()
start_ts_ms = start_ts * 1000 # ES uses ms
if end:
end = end.replace(microsecond=0)
end_ts = end.replace(tzinfo=timezone.utc).timestamp()
end_ts_ms = end_ts * 1000 # ES uses ms
bounds_data = {}
if start:
bounds_data["min"] = start_ts_ms
if end:
bounds_data["max"] = end_ts_ms
bounds["extended_bounds"] = bounds_data
return bounds |
<SYSTEM_TASK:>
Remove all aggregations added to the search object
<END_TASK>
<USER_TASK:>
Description:
def reset_aggregations(self):
"""
Remove all aggregations added to the search object
""" |
temp_search = self.search.to_dict()
if 'aggs' in temp_search.keys():
del temp_search['aggs']
self.search.from_dict(temp_search)
self.parent_agg_counter = 0
self.child_agg_counter = 0
self.child_agg_counter_dict = defaultdict(int) |
<SYSTEM_TASK:>
Loops though the self.aggregations dict and adds them to the Search object
<END_TASK>
<USER_TASK:>
Description:
def fetch_aggregation_results(self):
"""
Loops though the self.aggregations dict and adds them to the Search object
in order in which they were created. Queries elasticsearch and returns a dict
containing the results
:returns: a dictionary containing the response from elasticsearch
""" |
self.reset_aggregations()
for key, val in self.aggregations.items():
self.search.aggs.bucket(self.parent_agg_counter, val)
self.parent_agg_counter += 1
self.search = self.search.extra(size=0)
response = self.search.execute()
self.flush_aggregations()
return response.to_dict() |
<SYSTEM_TASK:>
Get values for specific fields in the elasticsearch index, from source
<END_TASK>
<USER_TASK:>
Description:
def fetch_results_from_source(self, *fields, dataframe=False):
"""
Get values for specific fields in the elasticsearch index, from source
:param fields: a list of fields that have to be retrieved from the index
:param dataframe: if true, will return the data in the form of a pandas.DataFrame
:returns: a list of dicts(key_val pairs) containing the values for the applied fields
if dataframe=True, will return the a dataframe containing the data in rows
and the fields representing column names
""" |
if not fields:
raise AttributeError("Please provide the fields to get from elasticsearch!")
self.reset_aggregations()
self.search = self.search.extra(_source=fields)
self.search = self.search.extra(size=self.size)
response = self.search.execute()
hits = response.to_dict()['hits']['hits']
data = [item["_source"] for item in hits]
if dataframe:
df = pd.DataFrame.from_records(data)
return df.fillna(0)
return data |
<SYSTEM_TASK:>
Get time series data for the specified fields and period of analysis
<END_TASK>
<USER_TASK:>
Description:
def get_timeseries(self, child_agg_count=0, dataframe=False):
"""
Get time series data for the specified fields and period of analysis
:param child_agg_count: the child aggregation count to be used
default = 0
:param dataframe: if dataframe=True, return a pandas.DataFrame object
:returns: dictionary containing "date", "value" and "unixtime" keys
with lists as values containing data from each bucket in the
aggregation
""" |
res = self.fetch_aggregation_results()
ts = {"date": [], "value": [], "unixtime": []}
if 'buckets' not in res['aggregations'][str(self.parent_agg_counter - 1)]:
raise RuntimeError("Aggregation results have no buckets in time series results.")
for bucket in res['aggregations'][str(self.parent_agg_counter - 1)]['buckets']:
ts['date'].append(parser.parse(bucket['key_as_string']).date())
if str(child_agg_count) in bucket:
# We have a subaggregation with the value
# If it is percentiles we get the median
if 'values' in bucket[str(child_agg_count)]:
val = bucket[str(child_agg_count)]['values']['50.0']
if val == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
val = None
ts['value'].append(val)
else:
ts['value'].append(bucket[str(child_agg_count)]['value'])
else:
ts['value'].append(bucket['doc_count'])
# unixtime comes in ms from ElasticSearch
ts['unixtime'].append(bucket['key'] / 1000)
if dataframe:
df = pd.DataFrame.from_records(ts, index="date")
return df.fillna(0)
return ts |
<SYSTEM_TASK:>
Compute the values for single valued aggregations
<END_TASK>
<USER_TASK:>
Description:
def get_aggs(self):
"""
Compute the values for single valued aggregations
:returns: the single aggregation value
""" |
res = self.fetch_aggregation_results()
if 'aggregations' in res and 'values' in res['aggregations'][str(self.parent_agg_counter - 1)]:
try:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['values']["50.0"]
if agg == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
agg = None
except Exception as e:
raise RuntimeError("Multivalue aggregation result not supported")
elif 'aggregations' in res and 'value' in res['aggregations'][str(self.parent_agg_counter - 1)]:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['value']
else:
agg = res['hits']['total']
return agg |
<SYSTEM_TASK:>
Compute the value for multi-valued aggregations
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, dataframe=False):
"""
Compute the value for multi-valued aggregations
:returns: a dict containing 'keys' and their corresponding 'values'
""" |
res = self.fetch_aggregation_results()
keys = []
values = []
for bucket in res['aggregations'][str(self.parent_agg_counter - 1)]['buckets']:
keys.append(bucket['key'])
values.append(bucket['doc_count'])
result = {"keys": keys, "values": values}
if dataframe:
result = pd.DataFrame.from_records(result)
return result |
<SYSTEM_TASK:>
Normalize the "fields" argument to most find methods
<END_TASK>
<USER_TASK:>
Description:
def _param_fields(kwargs, fields):
"""
Normalize the "fields" argument to most find methods
""" |
if fields is None:
return
if type(fields) in [list, set, frozenset, tuple]:
fields = {x: True for x in fields}
if type(fields) == dict:
fields.setdefault("_id", False)
kwargs["projection"] = fields |
<SYSTEM_TASK:>
Adds batch_size, limit, sort parameters to a DB cursor
<END_TASK>
<USER_TASK:>
Description:
def patch_cursor(cursor, batch_size=None, limit=None, skip=None, sort=None, **kwargs):
"""
Adds batch_size, limit, sort parameters to a DB cursor
""" |
if type(batch_size) == int:
cursor.batch_size(batch_size)
if limit is not None:
cursor.limit(limit)
if sort is not None:
cursor.sort(sort)
if skip is not None:
cursor.skip(skip) |
<SYSTEM_TASK:>
Returns True if the search matches at least one document
<END_TASK>
<USER_TASK:>
Description:
def exists(self, query, **args):
"""
Returns True if the search matches at least one document
""" |
return bool(self.find(query, **args).limit(1).count()) |
<SYSTEM_TASK:>
Returns a copy of the pymongo collection with various options set up
<END_TASK>
<USER_TASK:>
Description:
def _collection_with_options(self, kwargs):
""" Returns a copy of the pymongo collection with various options set up """ |
# class DocumentClassWithFields(self.document_class):
# _fetched_fields = kwargs.get("projection")
# mongokat_collection = self
read_preference = kwargs.get("read_preference") or getattr(self.collection, "read_preference", None) or ReadPreference.PRIMARY
if "read_preference" in kwargs:
del kwargs["read_preference"]
# Simplified tag usage
if "read_use" in kwargs:
if kwargs["read_use"] == "primary":
read_preference = ReadPreference.PRIMARY
elif kwargs["read_use"] == "secondary":
read_preference = ReadPreference.SECONDARY
elif kwargs["read_use"] == "nearest":
read_preference = ReadPreference.NEAREST
elif kwargs["read_use"]:
read_preference = read_preferences.Secondary(tag_sets=[{"use": kwargs["read_use"]}])
del kwargs["read_use"]
write_concern = None
if kwargs.get("w") is 0:
write_concern = WriteConcern(w=0)
elif kwargs.get("write_concern"):
write_concern = kwargs.get("write_concern")
codec_options = CodecOptions(
document_class=(
self.document_class,
{
"fetched_fields": kwargs.get("projection"),
"mongokat_collection": self
}
)
)
return self.collection.with_options(
codec_options=codec_options,
read_preference=read_preference,
write_concern=write_concern
) |
<SYSTEM_TASK:>
Pass me a list of base64-encoded ObjectId
<END_TASK>
<USER_TASK:>
Description:
def find_by_b64ids(self, _ids, **kwargs):
"""
Pass me a list of base64-encoded ObjectId
""" |
return self.find_by_ids([ObjectId(base64.b64decode(_id)) for _id in _ids], **kwargs) |
<SYSTEM_TASK:>
Return one field as an iterator.
<END_TASK>
<USER_TASK:>
Description:
def iter_column(self, query=None, field="_id", **kwargs):
"""
Return one field as an iterator.
Beware that if your query returns records where the field is not set, it will raise a KeyError.
""" |
find_kwargs = {
"projection": {"_id": False}
}
find_kwargs["projection"][field] = True
cursor = self._collection_with_options(kwargs).find(query, **find_kwargs) # We only want 1 field: bypass the ORM
patch_cursor(cursor, **kwargs)
return (dotdict(x)[field] for x in cursor) |
<SYSTEM_TASK:>
Inserts the data as a new document.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, data, return_object=False):
""" Inserts the data as a new document. """ |
obj = self(data) # pylint: disable=E1102
obj.save()
if return_object:
return obj
else:
return obj["_id"] |
<SYSTEM_TASK:>
Print usage and msg and exit with given code.
<END_TASK>
<USER_TASK:>
Description:
def usage(ecode, msg=''):
"""
Print usage and msg and exit with given code.
""" |
print >> sys.stderr, __doc__
if msg:
print >> sys.stderr, msg
sys.exit(ecode) |
<SYSTEM_TASK:>
Add a non-fuzzy translation to the dictionary.
<END_TASK>
<USER_TASK:>
Description:
def add(msgid, transtr, fuzzy):
"""
Add a non-fuzzy translation to the dictionary.
""" |
global MESSAGES
if not fuzzy and transtr and not transtr.startswith('\0'):
MESSAGES[msgid] = transtr |
<SYSTEM_TASK:>
Compile all message catalogs .po files into .mo files.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""
Compile all message catalogs .po files into .mo files.
Skips not changed file based on source mtime.
""" |
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.') |
<SYSTEM_TASK:>
For each origin an elements object is created in the ouput.
<END_TASK>
<USER_TASK:>
Description:
def __get_response_element_data(self, key1, key2):
"""
For each origin an elements object is created in the ouput.
For each destination, an object is created inside elements object. For example, if there are
2 origins and 1 destination, 2 element objects with 1 object each are created. If there are
2 origins and 2 destinations, 2 element objects with 2 objects each are created.
""" |
if not self.dict_response[key1][key2]:
l = self.response
for i, orig in enumerate(self.origins):
self.dict_response[key1][key2][orig] = {}
for j, dest in enumerate(self.destinations):
if l[i]['elements'][j]['status'] == 'OK':
self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j][key1][key2]
else:
self.dict_response[key1][key2][orig][dest] = l[i]['elements'][j]['status']
return self.dict_response[key1][key2] |
<SYSTEM_TASK:>
Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance.
<END_TASK>
<USER_TASK:>
Description:
def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None):
"""
Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance.
""" |
if not self.dict_response['distance']['value']:
self.get_distance_values()
if origin_raw:
origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw])
else:
origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]])
tmp_origin = copy.deepcopy(origin)
if max_distance is not None:
for k, v in tmp_origin.iteritems():
if v > max_distance or v == 'ZERO_RESULTS':
del(origin[k])
return origin |
<SYSTEM_TASK:>
Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.
<END_TASK>
<USER_TASK:>
Description:
def rename_node(self, prefix):
"""
Rename AMR graph nodes to prefix + node_index to avoid nodes with the same name in two different AMRs.
""" |
node_map_dict = {}
# map each node to its new name (e.g. "a1")
for i in range(0, len(self.nodes)):
node_map_dict[self.nodes[i]] = prefix + str(i)
# update node name
for i, v in enumerate(self.nodes):
self.nodes[i] = node_map_dict[v]
# update node name in relations
for node_relations in self.relations:
for i, l in enumerate(node_relations):
node_relations[i][1] = node_map_dict[l[1]] |
<SYSTEM_TASK:>
Generate a bar plot with three columns in each x position and save it to file_name
<END_TASK>
<USER_TASK:>
Description:
def bar3_chart(self, title, labels, data1, file_name, data2, data3, legend=["", ""]):
"""
Generate a bar plot with three columns in each x position and save it to file_name
:param title: title to be used in the chart
:param labels: list of labels for the x axis
:param data1: values for the first columns
:param file_name: name of the file in which to save the chart
:param data2: values for the second columns
:param data3: values for the third columns
:param legend: legend to be shown in the chart
:return:
""" |
colors = ["orange", "grey"]
data1 = self.__convert_none_to_zero(data1)
data2 = self.__convert_none_to_zero(data2)
data3 = self.__convert_none_to_zero(data3)
fig, ax = plt.subplots(1)
xpos = np.arange(len(data1))
width = 0.28
plt.title(title)
y_pos = np.arange(len(data1))
ppl.bar(xpos + width + width, data3, color="orange", width=0.28, annotate=True)
ppl.bar(xpos + width, data1, color='grey', width=0.28, annotate=True)
ppl.bar(xpos, data2, grid='y', width=0.28, annotate=True)
plt.xticks(xpos + width, labels)
plt.legend(legend, loc=2)
os.makedirs(os.path.dirname(file_name), exist_ok=True)
plt.savefig(file_name)
plt.close() |
<SYSTEM_TASK:>
Get the sections of the report and howto build them.
<END_TASK>
<USER_TASK:>
Description:
def sections(self):
"""
Get the sections of the report and howto build them.
:return: a dict with the method to be called to fill each section of the report
""" |
secs = OrderedDict()
secs['Overview'] = self.sec_overview
secs['Communication Channels'] = self.sec_com_channels
secs['Detailed Activity by Project'] = self.sec_projects
return secs |
<SYSTEM_TASK:>
Replaces a string in a given file with another string
<END_TASK>
<USER_TASK:>
Description:
def replace_text(filepath, to_replace, replacement):
"""
Replaces a string in a given file with another string
:param file: the file in which the string has to be replaced
:param to_replace: the string to be replaced in the file
:param replacement: the string which replaces 'to_replace' in the file
""" |
with open(filepath) as file:
s = file.read()
s = s.replace(to_replace, replacement)
with open(filepath, 'w') as file:
file.write(s) |
<SYSTEM_TASK:>
Replaces a string with its replacement in all the files in the directory
<END_TASK>
<USER_TASK:>
Description:
def replace_text_dir(self, directory, to_replace, replacement, file_type=None):
"""
Replaces a string with its replacement in all the files in the directory
:param directory: the directory in which the files have to be modified
:param to_replace: the string to be replaced in the files
:param replacement: the string which replaces 'to_replace' in the files
:param file_type: file pattern to match the files in which the string has to be replaced
""" |
if not file_type:
file_type = "*.tex"
for file in glob.iglob(os.path.join(directory, file_type)):
self.replace_text(file, to_replace, replacement) |
<SYSTEM_TASK:>
Callback function to handle variable number of arguments in optparse
<END_TASK>
<USER_TASK:>
Description:
def cb(option, value, parser):
"""
Callback function to handle variable number of arguments in optparse
""" |
arguments = [value]
for arg in parser.rargs:
if arg[0] != "-":
arguments.append(arg)
else:
del parser.rargs[:len(arguments)]
break
if getattr(parser.values, option.dest):
arguments.extend(getattr(parser.values, option.dest))
setattr(parser.values, option.dest, arguments) |
<SYSTEM_TASK:>
Parse arguments and check if the arguments are valid
<END_TASK>
<USER_TASK:>
Description:
def check_args(args):
"""
Parse arguments and check if the arguments are valid
""" |
if not os.path.exists(args.fd):
print("Not a valid path", args.fd, file=ERROR_LOG)
return [], [], False
if args.fl is not None:
# we already ensure the file can be opened and opened the file
file_line = args.fl.readline()
amr_ids = file_line.strip().split()
elif args.f is None:
print("No AMR ID was given", file=ERROR_LOG)
return [], [], False
else:
amr_ids = args.f
names = []
check_name = True
if args.p is None:
names = get_names(args.fd, amr_ids)
# no need to check names
check_name = False
if len(names) == 0:
print("Cannot find any user who tagged these AMR", file=ERROR_LOG)
return [], [], False
else:
names = args.p
if len(names) == 0:
print("No user was given", file=ERROR_LOG)
return [], [], False
if len(names) == 1:
print("Only one user is given. Smatch calculation requires at least two users.", file=ERROR_LOG)
return [], [], False
if "consensus" in names:
con_index = names.index("consensus")
names.pop(con_index)
names.append("consensus")
# check if all the AMR_id and user combinations are valid
if check_name:
pop_name = []
for i, name in enumerate(names):
for amr in amr_ids:
amr_path = args.fd + name + "/" + amr + ".txt"
if not os.path.exists(amr_path):
print("User", name, "fails to tag AMR", amr, file=ERROR_LOG)
pop_name.append(i)
break
if len(pop_name) != 0:
pop_num = 0
for p in pop_name:
print("Deleting user", names[p - pop_num], "from the name list", file=ERROR_LOG)
names.pop(p - pop_num)
pop_num += 1
if len(names) < 2:
print("Not enough users to evaluate. Smatch requires >2 users who tag all the AMRs", file=ERROR_LOG)
return "", "", False
return amr_ids, names, True |
<SYSTEM_TASK:>
Create a CSV file with the given data and store it in the
<END_TASK>
<USER_TASK:>
Description:
def create_csv(filename, csv_data, mode="w"):
"""
Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w'
""" |
with open(filename, mode) as f:
csv_data.replace("_", r"\_")
f.write(csv_data) |
<SYSTEM_TASK:>
This function will return the elasticsearch index for a corresponding
<END_TASK>
<USER_TASK:>
Description:
def get_metric_index(self, data_source):
"""
This function will return the elasticsearch index for a corresponding
data source. It chooses in between the default and the user inputed
es indices and returns the user inputed one if it is available.
:param data_source: the data source for which the index has to be returned
:returns: an elasticsearch index name
""" |
if data_source in self.index_dict:
index = self.index_dict[data_source]
else:
index = self.class2index[self.ds2class[data_source]]
return Index(index_name=index) |
<SYSTEM_TASK:>
Generate the "overview" section of the report.
<END_TASK>
<USER_TASK:>
Description:
def get_sec_overview(self):
"""
Generate the "overview" section of the report.
""" |
logger.debug("Calculating Overview metrics.")
data_path = os.path.join(self.data_dir, "overview")
if not os.path.exists(data_path):
os.makedirs(data_path)
overview_config = {
"activity_metrics": [],
"author_metrics": [],
"bmi_metrics": [],
"time_to_close_metrics": [],
"projects_metrics": []
}
for ds in self.data_sources:
metric_file = self.ds2class[ds]
metric_index = self.get_metric_index(ds)
overview = metric_file.overview(metric_index, self.start_date, self.end_date)
for section in overview_config:
overview_config[section] += overview[section]
overview_config['activity_file_csv'] = "data_source_evolution.csv"
overview_config['efficiency_file_csv'] = "efficiency.csv"
# ACTIVITY METRICS
metrics = overview_config['activity_metrics']
file_name = overview_config['activity_file_csv']
file_name = os.path.join(data_path, file_name)
csv = "metricsnames, netvalues, relativevalues, datasource\n"
for metric in metrics:
(last, percentage) = get_trend(metric.timeseries())
csv += "{}, {}, {}, {}\n".format(metric.name, last,
percentage, metric.DS_NAME)
csv = csv.replace("_", "\_")
create_csv(file_name, csv)
# AUTHOR METRICS
"""
Git Authors:
-----------
Description: average number of developers per month by quarters
(so we have the average number of developers per month during
those three months). If the approach is to work at the level of month,
then just the number of developers per month.
"""
author = overview_config['author_metrics']
if author:
authors_by_period = author[0]
title_label = file_label = authors_by_period.name + ' per ' + self.interval
file_path = os.path.join(data_path, file_label)
csv_data = authors_by_period.timeseries(dataframe=True)
# generate the CSV and the image file displaying the data
self.create_csv_fig_from_df([csv_data], file_path, [authors_by_period.name],
fig_type="bar", title=title_label, xlabel="time_period",
ylabel=authors_by_period.id)
# BMI METRICS
bmi = []
bmi_metrics = overview_config['bmi_metrics']
csv = ""
for metric in bmi_metrics:
bmi.append(metric.aggregations())
csv += metric.id + ", "
# Time to close METRICS
ttc = []
ttc_metrics = overview_config['time_to_close_metrics']
for metric in ttc_metrics:
ttc.append(metric.aggregations())
csv += metric.id + ", "
# generate efficiency file
csv = csv[:-2] + "\n"
csv = csv.replace("_", "")
bmi.extend(ttc)
for val in bmi:
csv += "%s, " % str_val(val)
if csv[-2:] == ", ":
csv = csv[:-2]
file_name = os.path.join(data_path, 'efficiency.csv')
create_csv(file_name, csv)
logger.debug("Overview metrics generation complete!") |
<SYSTEM_TASK:>
Generate the "project activity" section of the report.
<END_TASK>
<USER_TASK:>
Description:
def get_sec_project_activity(self):
"""
Generate the "project activity" section of the report.
""" |
logger.debug("Calculating Project Activity metrics.")
data_path = os.path.join(self.data_dir, "activity")
if not os.path.exists(data_path):
os.makedirs(data_path)
for ds in self.data_sources:
metric_file = self.ds2class[ds]
metric_index = self.get_metric_index(ds)
project_activity = metric_file.project_activity(metric_index, self.start_date,
self.end_date)
headers = []
data_frames = []
title_names = []
file_name = ""
for metric in project_activity['metrics']:
file_name += metric.DS_NAME + "_" + metric.id + "_"
title_names.append(metric.name)
headers.append(metric.id)
data_frames.append(metric.timeseries(dataframe=True))
file_name = file_name[:-1] # remove trailing underscore
file_path = os.path.join(data_path, file_name)
title_name = " & ".join(title_names) + ' per ' + self.interval
self.create_csv_fig_from_df(data_frames, file_path, headers,
fig_type="bar", title=title_name) |
<SYSTEM_TASK:>
Generate the "project community" section of the report.
<END_TASK>
<USER_TASK:>
Description:
def get_sec_project_community(self):
"""
Generate the "project community" section of the report.
""" |
logger.debug("Calculating Project Community metrics.")
data_path = os.path.join(self.data_dir, "community")
if not os.path.exists(data_path):
os.makedirs(data_path)
project_community_config = {
"author_metrics": [],
"people_top_metrics": [],
"orgs_top_metrics": []
}
for ds in self.data_sources:
metric_file = self.ds2class[ds]
metric_index = self.get_metric_index(ds)
project_community = metric_file.project_community(metric_index, self.start_date,
self.end_date)
for section in project_community_config:
project_community_config[section] += project_community[section]
# Get git authors:
author = project_community_config['author_metrics'][0]
author_ts = author.timeseries(dataframe=True)
csv_labels = [author.id]
file_label = author.DS_NAME + "_" + author.id
file_path = os.path.join(data_path, file_label)
title_label = author.name + " per " + self.interval
self.create_csv_fig_from_df([author_ts], file_path, csv_labels, fig_type="bar",
title=title_label)
"""Main developers"""
authors = project_community_config['people_top_metrics'][0]
authors_df = authors.aggregations()
authors_df = authors_df.head(self.TOP_MAX)
authors_df.columns = [authors.id, "commits"]
file_label = authors.DS_NAME + "_top_" + authors.id + ".csv"
file_path = os.path.join(data_path, file_label)
authors_df.to_csv(file_path, index=False)
"""Main organizations"""
orgs = project_community_config['orgs_top_metrics'][0]
orgs_df = orgs.aggregations()
orgs_df = orgs_df.head(self.TOP_MAX)
orgs_df.columns = [orgs.id, "commits"]
file_label = orgs.DS_NAME + "_top_" + orgs.id + ".csv"
file_path = os.path.join(data_path, file_label)
orgs_df.to_csv(file_path, index=False) |
<SYSTEM_TASK:>
Joins all the datafarames horizontally and creates a CSV and an image file from
<END_TASK>
<USER_TASK:>
Description:
def create_csv_fig_from_df(self, data_frames=[], filename=None, headers=[], index_label=None,
fig_type=None, title=None, xlabel=None, ylabel=None, xfont=10,
yfont=10, titlefont=15, fig_size=(8, 10), image_type="eps"):
"""
Joins all the datafarames horizontally and creates a CSV and an image file from
those dataframes.
:param data_frames: a list of dataframes containing timeseries data from various metrics
:param filename: the name of the csv and image file
:param headers: a list of headers to be applied to columns of the dataframes
:param index_label: name of the index column
:param fig_type: figure type. Currently we support 'bar' graphs
default: normal graph
:param title: display title of the figure
:param filename: file name to save the figure as
:param xlabel: label for x axis
:param ylabel: label for y axis
:param xfont: font size of x axis label
:param yfont: font size of y axis label
:param titlefont: font size of title of the figure
:param fig_size: tuple describing size of the figure (in centimeters) (W x H)
:param image_type: the image type to save the image as: jpg, png, etc
default: png
:returns: creates a csv having name as "filename".csv and an image file
having the name as "filename"."image_type"
""" |
if not data_frames:
logger.error("No dataframes provided to create CSV")
sys.exit(1)
assert(len(data_frames) == len(headers))
dataframes = []
for index, df in enumerate(data_frames):
df = df.rename(columns={"value": headers[index].replace("_", "")})
dataframes.append(df)
res_df = pd.concat(dataframes, axis=1)
if "unixtime" in res_df:
del res_df['unixtime']
if not index_label:
index_label = "Date"
# Create the CSV file:
csv_name = filename + ".csv"
res_df.to_csv(csv_name, index_label=index_label)
logger.debug("file: {} was created.".format(csv_name))
# Create the Image:
image_name = filename + "." + image_type
title = title.replace("_", "")
figure(figsize=fig_size)
plt.subplot(111)
if fig_type == "bar":
ax = res_df.plot.bar(figsize=fig_size)
ticklabels = res_df.index
ax.xaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticklabels))
else:
plt.plot(res_df)
if not ylabel:
ylabel = "num " + " & ".join(headers)
if not xlabel:
xlabel = index_label
plt.title(title, fontsize=titlefont)
plt.ylabel(ylabel, fontsize=yfont)
plt.xlabel(xlabel, fontsize=xfont)
plt.grid(True)
plt.savefig(image_name)
logger.debug("Figure {} was generated.".format(image_name)) |
<SYSTEM_TASK:>
Normalize non-ascii characters to their closest ascii counterparts
<END_TASK>
<USER_TASK:>
Description:
def normalize(s, replace_spaces=True):
"""Normalize non-ascii characters to their closest ascii counterparts
""" |
whitelist = (' -' + string.ascii_letters + string.digits)
if type(s) == six.binary_type:
s = six.text_type(s, 'utf-8', 'ignore')
table = {}
for ch in [ch for ch in s if ch not in whitelist]:
if ch not in table:
try:
replacement = unicodedata.normalize('NFKD', ch)[0]
if replacement in whitelist:
table[ord(ch)] = replacement
else:
table[ord(ch)] = u'_'
except:
table[ord(ch)] = u'_'
if replace_spaces:
return s.translate(table).replace(u'_', u'').replace(' ', '_')
else:
return s.translate(table).replace(u'_', u'') |
<SYSTEM_TASK:>
Return list of Robot Framework -compatible cli-variables parsed
<END_TASK>
<USER_TASK:>
Description:
def get_robot_variables():
"""Return list of Robot Framework -compatible cli-variables parsed
from ROBOT_-prefixed environment variable
""" |
prefix = 'ROBOT_'
variables = []
def safe_str(s):
if isinstance(s, six.text_type):
return s
else:
return six.text_type(s, 'utf-8', 'ignore')
for key in os.environ:
if key.startswith(prefix) and len(key) > len(prefix):
variables.append(safe_str(
'%s:%s' % (key[len(prefix):], os.environ[key]),
))
return variables |
<SYSTEM_TASK:>
Initiate one-shot conversion.
<END_TASK>
<USER_TASK:>
Description:
def convert(self):
"""Initiate one-shot conversion.
The current settings are used, with the exception of continuous mode.""" |
c = self.config
c &= (~MCP342x._continuous_mode_mask & 0x7f) # Force one-shot
c |= MCP342x._not_ready_mask # Convert
logger.debug('Convert ' + hex(self.address) + ' config: ' + bin(c))
self.bus.write_byte(self.address, c) |
<SYSTEM_TASK:>
Create a filter dict with date_field from start to end dates.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_range(cls, date_field, start=None, end=None):
"""
Create a filter dict with date_field from start to end dates.
:param date_field: field with the date value
:param start: date with the from value. Should be a datetime.datetime object
of the form: datetime.datetime(2018, 5, 25, 15, 17, 39)
:param end: date with the to value. Should be a datetime.datetime object
of the form: datetime.datetime(2018, 5, 25, 15, 17, 39)
:return: a dict containing a range filter which can be used later in an
es_dsl Search object using the `filter` method.
""" |
if not start and not end:
return ''
start_end = {}
if start:
start_end["gte"] = "%s" % start.isoformat()
if end:
start_end["lte"] = "%s" % end.isoformat()
query_range = {date_field: start_end}
return query_range |
<SYSTEM_TASK:>
Create a es_dsl query object with the date range and filters.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_basic(cls, date_field=None, start=None, end=None,
filters={}):
"""
Create a es_dsl query object with the date range and filters.
:param date_field: field with the date value
:param start: date with the from value, should be a datetime.datetime object
:param end: date with the to value, should be a datetime.datetime object
:param filters: dict with the filters to be applied
:return: a DSL query containing the required parameters
Ex: {'query': {'bool': {'filter': [{'range': {'DATE_FIELD':
{'gte': '2015-05-19T00:00:00',
'lte': '2018-05-18T00:00:00'}}}],
'must': [{'match_phrase': {'first_name': 'Jhon'}},
{'match_phrase': {'last_name': 'Doe'}},
{'match_phrase': {'Phone': 2222222}}
]}}}
""" |
query_basic = Search()
query_filters = cls.__get_query_filters(filters)
for f in query_filters:
query_basic = query_basic.query(f)
query_filters_inverse = cls.__get_query_filters(filters, inverse=True)
# Here, don't forget the '~'. That is what makes this an inverse filter.
for f in query_filters_inverse:
query_basic = query_basic.query(~f)
if not date_field:
query_range = {}
else:
query_range = cls.__get_query_range(date_field, start, end)
# Applying the range filter
query_basic = query_basic.filter('range', **query_range)
return query_basic |
<SYSTEM_TASK:>
Create a es_dsl aggregation object based on a term.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_agg_terms(cls, field, agg_id=None):
"""
Create a es_dsl aggregation object based on a term.
:param field: field to be used to aggregate
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"terms": {
"field": <field>,
"size:": <size>,
"order":{
"_count":"desc"
}
}
Which will then be used as Search.aggs.bucket(agg_id, query_agg) method
to add aggregations to the es_dsl Search object
""" |
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("terms", field=field, size=cls.AGG_SIZE, order={"_count": "desc"})
return (agg_id, query_agg) |
<SYSTEM_TASK:>
Create an es_dsl aggregation object for getting the max value of a field.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_agg_max(cls, field, agg_id=None):
"""
Create an es_dsl aggregation object for getting the max value of a field.
:param field: field from which the get the max value
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"max": {
"field": <field>
}
""" |
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("max", field=field)
return (agg_id, query_agg) |
<SYSTEM_TASK:>
Create an es_dsl aggregation object for getting the average value of a field.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_agg_avg(cls, field, agg_id=None):
"""
Create an es_dsl aggregation object for getting the average value of a field.
:param field: field from which the get the average value
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"avg": {
"field": <field>
}
""" |
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("avg", field=field)
return (agg_id, query_agg) |
<SYSTEM_TASK:>
Create an es_dsl aggregation object for getting the approximate count of distinct values of a field.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_agg_cardinality(cls, field, agg_id=None):
"""
Create an es_dsl aggregation object for getting the approximate count of distinct values of a field.
:param field: field from which the get count of distinct values
:return: a tuple with the aggregation id and es_dsl aggregation object. Ex:
{
"cardinality": {
"field": <field>,
"precision_threshold": 3000
}
""" |
if not agg_id:
agg_id = cls.AGGREGATION_ID
query_agg = A("cardinality", field=field, precision_threshold=cls.ES_PRECISION)
return (agg_id, query_agg) |
<SYSTEM_TASK:>
Return a dict with the bounds for a date_histogram agg.
<END_TASK>
<USER_TASK:>
Description:
def __get_bounds(cls, start=None, end=None):
"""
Return a dict with the bounds for a date_histogram agg.
:param start: date from for the date_histogram agg, should be a datetime.datetime object
:param end: date to for the date_histogram agg, should be a datetime.datetime object
:return: a dict with the DSL bounds for a date_histogram aggregation
""" |
bounds = {}
if start or end:
# Extend bounds so we have data until start and end
start_ts = None
end_ts = None
if start:
# elasticsearch is unable to convert date with microseconds into long
# format for processing, hence we convert microseconds to zero
start = start.replace(microsecond=0)
start_ts = start.replace(tzinfo=timezone.utc).timestamp()
start_ts_ms = start_ts * 1000 # ES uses ms
if end:
end = end.replace(microsecond=0)
end_ts = end.replace(tzinfo=timezone.utc).timestamp()
end_ts_ms = end_ts * 1000 # ES uses ms
bounds_data = {}
if start:
bounds_data["min"] = start_ts_ms
if end:
bounds_data["max"] = end_ts_ms
bounds["extended_bounds"] = bounds_data
return bounds |
<SYSTEM_TASK:>
Create an es_dsl aggregation object for getting the time series values for a field.
<END_TASK>
<USER_TASK:>
Description:
def __get_query_agg_ts(cls, field, time_field, interval=None,
time_zone=None, start=None, end=None,
agg_type='count', offset=None):
"""
Create an es_dsl aggregation object for getting the time series values for a field.
:param field: field to get the time series values
:param time_field: field with the date
:param interval: interval to be used to generate the time series values, such as:(year(y),
quarter(q), month(M), week(w), day(d), hour(h), minute(m), second(s))
:param time_zone: time zone for the time_field
:param start: date from for the time series, should be a datetime.datetime object
:param end: date to for the time series, should be a datetime.datetime object
:param agg_type: kind of aggregation for the field (cardinality, avg, percentiles)
:param offset: offset to be added to the time_field in days
:return: a aggregation object to calculate timeseries values of a field
""" |
""" Time series for an aggregation metric """
if not interval:
interval = '1M'
if not time_zone:
time_zone = 'UTC'
if not field:
field_agg = ''
else:
if agg_type == "cardinality":
agg_id, field_agg = cls.__get_query_agg_cardinality(field, agg_id=cls.AGGREGATION_ID + 1)
elif agg_type == "avg":
agg_id, field_agg = cls.__get_query_agg_avg(field, agg_id=cls.AGGREGATION_ID + 1)
elif agg_type == "percentiles":
agg_id, field_agg = cls.__get_query_agg_percentiles(field, agg_id=cls.AGGREGATION_ID + 1)
else:
raise RuntimeError("Aggregation of %s in ts not supported" % agg_type)
bounds = {}
if start or end:
if not offset:
# With offset and quarter interval bogus buckets are added
# to the start and to the end if extended_bounds is used
# https://github.com/elastic/elasticsearch/issues/23776
bounds = cls.__get_bounds(start, end)
else:
bounds = {'offset': offset}
query_agg = A("date_histogram", field=time_field, interval=interval,
time_zone=time_zone, min_doc_count=0, **bounds)
agg_dict = field_agg.to_dict()[field_agg.name]
query_agg.bucket(agg_id, field_agg.name, **agg_dict)
return (cls.AGGREGATION_ID, query_agg) |
<SYSTEM_TASK:>
Build the DSL query for counting the number of items.
<END_TASK>
<USER_TASK:>
Description:
def get_count(cls, date_field=None, start=None, end=None, filters={}):
"""
Build the DSL query for counting the number of items.
:param date_field: field with the date
:param start: date from which to start counting, should be a datetime.datetime object
:param end: date until which to count items, should be a datetime.datetime object
:param filters: dict with the filters to be applied
:return: a DSL query with size parameter
""" |
""" Total number of items """
query_basic = cls.__get_query_basic(date_field=date_field,
start=start, end=end,
filters=filters)
# size=0 gives only the count and not the hits
query = query_basic.extra(size=0)
return query |
<SYSTEM_TASK:>
Makes sure we fetched the fields, and populate them if not.
<END_TASK>
<USER_TASK:>
Description:
def ensure_fields(self, fields, force_refetch=False):
""" Makes sure we fetched the fields, and populate them if not. """ |
# We fetched with fields=None, we should have fetched them all
if self._fetched_fields is None or self._initialized_with_doc:
return
if force_refetch:
missing_fields = fields
else:
missing_fields = [f for f in fields if f not in self._fetched_fields]
if len(missing_fields) == 0:
return
if "_id" not in self:
raise Exception("Can't ensure_fields because _id is missing")
self.refetch_fields(missing_fields) |
<SYSTEM_TASK:>
Refetches a list of fields from the DB
<END_TASK>
<USER_TASK:>
Description:
def refetch_fields(self, missing_fields):
""" Refetches a list of fields from the DB """ |
db_fields = self.mongokat_collection.find_one({"_id": self["_id"]}, fields={k: 1 for k in missing_fields})
self._fetched_fields += tuple(missing_fields)
if not db_fields:
return
for k, v in db_fields.items():
self[k] = v |
<SYSTEM_TASK:>
Removes this list of fields from both the local object and the DB.
<END_TASK>
<USER_TASK:>
Description:
def unset_fields(self, fields):
""" Removes this list of fields from both the local object and the DB. """ |
self.mongokat_collection.update_one({"_id": self["_id"]}, {"$unset": {
f: 1 for f in fields
}})
for f in fields:
if f in self:
del self[f] |
<SYSTEM_TASK:>
Saves just the currently set fields in the database.
<END_TASK>
<USER_TASK:>
Description:
def save_partial(self, data=None, allow_protected_fields=False, **kwargs):
""" Saves just the currently set fields in the database. """ |
# Backwards compat, deprecated argument
if "dotnotation" in kwargs:
del kwargs["dotnotation"]
if data is None:
data = dotdict(self)
if "_id" not in data:
raise KeyError("_id must be set in order to do a save_partial()")
del data["_id"]
if len(data) == 0:
return
if not allow_protected_fields:
self.mongokat_collection._check_protected_fields(data)
apply_on = dotdict(self)
self._initialized_with_doc = False
self.mongokat_collection.update_one({"_id": self["_id"]}, {"$set": data}, **kwargs)
for k, v in data.items():
apply_on[k] = v
self.update(dict(apply_on)) |
<SYSTEM_TASK:>
Read default argument values for a given tool
<END_TASK>
<USER_TASK:>
Description:
def read_default_args(tool_name):
"""
Read default argument values for a given tool
:param tool_name: Name of the script to read the default arguments for
:return: Dictionary of default arguments (shared + tool-specific)
""" |
global opinel_arg_dir
profile_name = 'default'
# h4ck to have an early read of the profile name
for i, arg in enumerate(sys.argv):
if arg == '--profile' and len(sys.argv) >= i + 1:
profile_name = sys.argv[i + 1]
#if not os.path.isdir(opinel_arg_dir):
# os.makedirs(opinel_arg_dir)
if not os.path.isdir(opinel_arg_dir):
try:
os.makedirs(opinel_arg_dir)
except:
# Within AWS Lambda, home directories are not writable. This attempts to detect that...
# ...and uses the /tmp folder, which *is* writable in AWS Lambda
opinel_arg_dir = os.path.join(tempfile.gettempdir(), '.aws/opinel')
if not os.path.isdir(opinel_arg_dir):
os.makedirs(opinel_arg_dir)
opinel_arg_file = os.path.join(opinel_arg_dir, '%s.json' % profile_name)
default_args = {}
if os.path.isfile(opinel_arg_file):
with open(opinel_arg_file, 'rt') as f:
all_args = json.load(f)
for target in all_args:
if tool_name.endswith(target):
default_args.update(all_args[target])
for k in all_args['shared']:
if k not in default_args:
default_args[k] = all_args['shared'][k]
return default_args |
<SYSTEM_TASK:>
Prompt function that works for Python2 and Python3
<END_TASK>
<USER_TASK:>
Description:
def prompt(test_input = None):
"""
Prompt function that works for Python2 and Python3
:param test_input: Value to be returned when testing
:return: Value typed by user (or passed in argument when testing)
""" |
if test_input != None:
if type(test_input) == list and len(test_input):
choice = test_input.pop(0)
elif type(test_input) == list:
choice = ''
else:
choice = test_input
else:
# Coverage: 4 missed statements
try:
choice = raw_input()
except:
choice = input()
return choice |
<SYSTEM_TASK:>
Prompt for an MFA code
<END_TASK>
<USER_TASK:>
Description:
def prompt_4_mfa_code(activate = False, input = None):
"""
Prompt for an MFA code
:param activate: Set to true when prompting for the 2nd code when activating a new MFA device
:param input: Used for unit testing
:return: The MFA code
""" |
while True:
if activate:
prompt_string = 'Enter the next value: '
else:
prompt_string = 'Enter your MFA code (or \'q\' to abort): '
mfa_code = prompt_4_value(prompt_string, no_confirm = True, input = input)
try:
if mfa_code == 'q':
return mfa_code
int(mfa_code)
mfa_code[5]
break
except:
printError('Error: your MFA code must only consist of digits and be at least 6 characters long.')
return mfa_code |
<SYSTEM_TASK:>
Prompt for an MFA serial number
<END_TASK>
<USER_TASK:>
Description:
def prompt_4_mfa_serial(input = None):
"""
Prompt for an MFA serial number
:param input: Used for unit testing
:return: The MFA serial number
""" |
return prompt_4_value('Enter your MFA serial:', required = False, regex = re_mfa_serial_format, regex_format = mfa_serial_format, input = input) |
<SYSTEM_TASK:>
Prompt whether the file should be overwritten
<END_TASK>
<USER_TASK:>
Description:
def prompt_4_overwrite(filename, force_write, input = None):
"""
Prompt whether the file should be overwritten
:param filename: Name of the file about to be written
:param force_write: Skip confirmation prompt if this flag is set
:param input: Used for unit testing
:return: Boolean whether file write operation is allowed
""" |
if not os.path.exists(filename) or force_write:
return True
return prompt_4_yes_no('File \'{}\' already exists. Do you want to overwrite it'.format(filename), input = input) |
<SYSTEM_TASK:>
From engine name, load the engine path and return the renderer class
<END_TASK>
<USER_TASK:>
Description:
def get_feed_renderer(engines, name):
"""
From engine name, load the engine path and return the renderer class
Raise 'FeedparserError' if any loading error
""" |
if name not in engines:
raise FeedparserError("Given feed name '{}' does not exists in 'settings.FEED_RENDER_ENGINES'".format(name))
renderer = safe_import_module(engines[name])
return renderer |
<SYSTEM_TASK:>
Build the list of target region names
<END_TASK>
<USER_TASK:>
Description:
def build_region_list(service, chosen_regions = [], partition_name = 'aws'):
"""
Build the list of target region names
:param service:
:param chosen_regions:
:param partition_name:
:return:
""" |
service = 'ec2containerservice' if service == 'ecs' else service # Of course things aren't that easy...
# Get list of regions from botocore
regions = Session().get_available_regions(service, partition_name = partition_name)
if len(chosen_regions):
return list((Counter(regions) & Counter(chosen_regions)).elements())
else:
return regions |
<SYSTEM_TASK:>
Instantiates an AWS API client
<END_TASK>
<USER_TASK:>
Description:
def connect_service(service, credentials, region_name = None, config = None, silent = False):
"""
Instantiates an AWS API client
:param service:
:param credentials:
:param region_name:
:param config:
:param silent:
:return:
""" |
api_client = None
try:
client_params = {}
client_params['service_name'] = service.lower()
session_params = {}
session_params['aws_access_key_id'] = credentials['AccessKeyId']
session_params['aws_secret_access_key'] = credentials['SecretAccessKey']
session_params['aws_session_token'] = credentials['SessionToken']
if region_name:
client_params['region_name'] = region_name
session_params['region_name'] = region_name
if config:
client_params['config'] = config
aws_session = boto3.session.Session(**session_params)
if not silent:
infoMessage = 'Connecting to AWS %s' % service
if region_name:
infoMessage = infoMessage + ' in %s' % region_name
printInfo('%s...' % infoMessage)
api_client = aws_session.client(**client_params)
except Exception as e:
printException(e)
return api_client |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.