text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Eliminates None or empty items from lists, tuples or sets passed in.
<END_TASK>
<USER_TASK:>
Description:
def filter_empty(values, default=None):
"""
Eliminates None or empty items from lists, tuples or sets passed in.
If values is None or empty after filtering, the default is returned.
"""
|
if values is None:
return default
elif hasattr(values, '__len__') and len(values) == 0:
return default
elif hasattr(values, '__iter__') and not isinstance(values, _filtered_types):
filtered = type(values) if isinstance(values, _filter_types) else list
values = filtered(
v for v in values if not (v is None or (hasattr(v, '__len__') and len(v) == 0))
)
return default if len(values) == 0 else values
return values
|
<SYSTEM_TASK:>
Sets the unit of this Dimensions.
<END_TASK>
<USER_TASK:>
Description:
def unit(self, unit):
"""Sets the unit of this Dimensions.
:param unit: The unit of this Dimensions.
:type: str
"""
|
allowed_values = ["cm", "inch", "foot"] # noqa: E501
if unit is not None and unit not in allowed_values:
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501
.format(unit, allowed_values)
)
self._unit = unit
|
<SYSTEM_TASK:>
This resource returns an dictionary of the localized map names for
<END_TASK>
<USER_TASK:>
Description:
def map_names(lang="en"):
"""This resource returns an dictionary of the localized map names for
the specified language. Only maps with events are listed - if you need a
list of all maps, use ``maps.json`` instead.
:param lang: The language to query the names for.
:return: the response is a dictionary where the key is the map id and the
value is the name of the map in the specified language.
"""
|
cache_name = "map_names.%s.json" % lang
data = get_cached("map_names.json", cache_name, params=dict(lang=lang))
return dict([(item["id"], item["name"]) for item in data])
|
<SYSTEM_TASK:>
This resource returns details about maps in the game, including details
<END_TASK>
<USER_TASK:>
Description:
def maps(map_id=None, lang="en"):
"""This resource returns details about maps in the game, including details
about floor and translation data on how to translate between world
coordinates and map coordinates.
:param map_id: Only list this map.
:param lang: Show localized texts in the specified language.
The response is a dictionary where the key is the map id and the value is
a dictionary containing the following properties:
map_name (string)
The map name.
min_level (number)
The minimal level of this map.
max_level (number)
The maximum level of this map.
default_floor (number)
The default floor of this map.
floors (list)
A list of available floors for this map.
region_id (number)
The id of the region this map belongs to.
region_name (string)
The name of the region this map belongs to.
continent_id (number)
The id of the continent this map belongs to.
continent_name (string)
The name of the continent this map belongs to.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
If a map_id is given, only the values for that map are returned.
"""
|
if map_id:
cache_name = "maps.%s.%s.json" % (map_id, lang)
params = {"map_id": map_id, "lang": lang}
else:
cache_name = "maps.%s.json" % lang
params = {"lang": lang}
data = get_cached("maps.json", cache_name, params=params).get("maps")
return data.get(str(map_id)) if map_id else data
|
<SYSTEM_TASK:>
This resource returns details about a map floor, used to populate a
<END_TASK>
<USER_TASK:>
Description:
def map_floor(continent_id, floor, lang="en"):
"""This resource returns details about a map floor, used to populate a
world map. All coordinates are map coordinates.
The returned data only contains static content. Dynamic content, such as
vendors, is not currently available.
:param continent_id: The continent.
:param floor: The map floor.
:param lang: Show localized texts in the specified language.
The response is an object with the following properties:
texture_dims (dimension)
The dimensions of the texture.
clamped_view (rect)
If present, it represents a rectangle of downloadable textures. Every
tile coordinate outside this rectangle is not available on the tile
server.
regions (object)
A mapping from region id to an object.
Each region object contains the following properties:
name (string)
The region name.
label_coord (coordinate)
The coordinates of the region label.
maps (object)
A mapping from the map id to an object.
Each map object contains the following properties:
name (string)
The map name.
min_level (number)
The minimum level of the map.
max_level (number)
The maximum level of the map.
default_floor (number)
The default floor of the map.
map_rect (rect)
The dimensions of the map.
continent_rect (rect)
The dimensions of the map within the continent coordinate system.
points_of_interest (list)
A list of points of interest (landmarks, waypoints and vistas)
Each points of interest object contains the following properties:
poi_id (number)
The point of interest id.
name (string)
The name of the point of interest.
type (string)
The type. This can be either "landmark" for actual points of
interest, "waypoint" for waypoints, or "vista" for vistas.
floor (number)
The floor of this object.
coord (coordinate)
The coordinates of this object.
tasks (list)
A list of renown hearts.
Each task object contains the following properties:
task_id (number)
The renown heart id.
objective (string)
The objective or name of the heart.
level (number)
The level of the heart.
coord (coordinate)
The coordinates where it takes place.
skill_challenges (list)
A list of skill challenges.
Each skill challenge object contains the following properties:
coord (coordinate)
The coordinates of this skill challenge.
sectors (list)
A list of areas within the map.
Each sector object contains the following properties:
sector_id (number)
The area id.
name (string)
The name of the area.
level (number)
The level of the area.
coord (coordinate)
The coordinates of this area (this is usually the center
position).
Special types:
Dimension properties are two-element lists of width and height.
Coordinate properties are two-element lists of the x and y position.
Rect properties are two-element lists of coordinates of the upper-left and
lower-right coordinates.
"""
|
cache_name = "map_floor.%s-%s.%s.json" % (continent_id, floor, lang)
params = {"continent_id": continent_id, "floor": floor, "lang": lang}
return get_cached("map_floor.json", cache_name, params=params)
|
<SYSTEM_TASK:>
This is applicable to fields with max_value and min_value as
<END_TASK>
<USER_TASK:>
Description:
def get_value_based_inclusive_interval(cls, field, max_value=None):
"""
This is applicable to fields with max_value and min_value as
validators.
Note:
1. This is different from fields with max_length as a validator
2. This means that the two methods based on value and length
are almost the same method but for the max_* attribute
that is being checked. Probably need to DRY this out at
some point.
"""
|
if field.max_value is None:
field.max_value = max_value or MAX_LENGTH
if field.min_value is None:
field.min_value = 0
Interval = namedtuple('interval', ['start', 'stop'])
return Interval(start=field.min_value, stop=field.max_value)
|
<SYSTEM_TASK:>
No raw bytes should escape from this, all byte encoding and
<END_TASK>
<USER_TASK:>
Description:
def _async_recv(self):
"""No raw bytes should escape from this, all byte encoding and
decoding should be handling inside this function"""
|
logging.info("Receive loop started")
recbuffer = b""
while not self._stop_event.is_set():
time.sleep(0.01)
try:
recbuffer = recbuffer + self._socket.recv(1024)
data = recbuffer.split(b'\r\n')
recbuffer = data.pop()
if data:
for line in data:
self._process_data(line.decode(encoding='UTF-8', errors='ignore'))
except BlockingIOError as e:
pass
logging.info("Receive loop stopped")
|
<SYSTEM_TASK:>
The main function used when ``directory_script_runner.py`` is run as a single script from the cl, or when installed as a cl command
<END_TASK>
<USER_TASK:>
Description:
def main(arguments=None):
"""
The main function used when ``directory_script_runner.py`` is run as a single script from the cl, or when installed as a cl command
"""
|
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="fundmentals"
)
arguments, settings, log, dbConn = su.setup()
# UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES
# AUTOMATICALLY
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
if successFlag and successFlag.lower() == "none":
successFlag = None
if failureFlag and failureFlag.lower() == "none":
failureFlag = None
directory_script_runner(
log=log,
pathToScriptDirectory=pathToDirectory,
databaseName=databaseName,
loginPath=loginPath,
successRule=successFlag,
failureRule=failureFlag
)
return
|
<SYSTEM_TASK:>
Sets the weight_unit of this MeasurementSettings.
<END_TASK>
<USER_TASK:>
Description:
def weight_unit(self, weight_unit):
"""Sets the weight_unit of this MeasurementSettings.
:param weight_unit: The weight_unit of this MeasurementSettings.
:type: str
"""
|
allowed_values = ["pound", "kilogram"] # noqa: E501
if weight_unit is not None and weight_unit not in allowed_values:
raise ValueError(
"Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501
.format(weight_unit, allowed_values)
)
self._weight_unit = weight_unit
|
<SYSTEM_TASK:>
Sets the dimensions_unit of this MeasurementSettings.
<END_TASK>
<USER_TASK:>
Description:
def dimensions_unit(self, dimensions_unit):
"""Sets the dimensions_unit of this MeasurementSettings.
:param dimensions_unit: The dimensions_unit of this MeasurementSettings.
:type: str
"""
|
allowed_values = ["inch", "cm", "foot", "meter"] # noqa: E501
if dimensions_unit is not None and dimensions_unit not in allowed_values:
raise ValueError(
"Invalid value for `dimensions_unit` ({0}), must be one of {1}" # noqa: E501
.format(dimensions_unit, allowed_values)
)
self._dimensions_unit = dimensions_unit
|
<SYSTEM_TASK:>
Determine whether pattern has already been loaded into the cache.
<END_TASK>
<USER_TASK:>
Description:
def already_resolved(self, pattern: QueryTriple) -> bool:
""" Determine whether pattern has already been loaded into the cache.
The "wild card" - `(None, None, None)` - always counts as resolved.
:param pattern: pattern to check
:return: True it is a subset of elements already loaded
"""
|
if self.sparql_locked or pattern == (None, None, None):
return True
for resolved_node in self.resolved_nodes:
if resolved_node != (None, None, None) and \
(pattern[0] == resolved_node[0] or resolved_node[0] is None) and \
(pattern[1] == resolved_node[1] or resolved_node[1] is None) and\
(pattern[2] == resolved_node[2] or resolved_node[2] is None):
return True
return False
|
<SYSTEM_TASK:>
Activate the base compatibility.
<END_TASK>
<USER_TASK:>
Description:
def fix_base(fix_environ):
"""Activate the base compatibility."""
|
def _is_android():
import os
vm_path = os.sep+"system"+os.sep+"bin"+os.sep+"dalvikvm"
if os.path.exists(vm_path) or os.path.exists(os.sep+"system"+vm_path):
return True
try:
import android
del android # Unused import (imported only for Android detection)
return True
except ImportError:
pass
return False
def _fix_android_environ():
import os
if "LD_LIBRARY_PATH" not in os.environ:
os.environ["LD_LIBRARY_PATH"] = ""
lib_path = os.pathsep+"/system/lib"+os.pathsep+"/vendor/lib"
if sys.python_bits == 64:
lib_path = os.pathsep+"/system/lib64"+os.pathsep+"/vendor/lib64" + lib_path
os.environ["LD_LIBRARY_PATH"] += lib_path
if sys.platform.startswith("linux") and sys.platform != "linux-android":
if _is_android():
sys.platform = "linux-android"
elif "-" not in sys.platform:
sys.platform = "linux"
sys.platform_codename = sys.platform
if sys.platform_codename == "win32":
sys.platform_codename = "win"
elif sys.platform_codename == "linux-android":
sys.platform_codename = "android"
if 'maxsize' in sys.__dict__:
if sys.maxsize > 2**32:
sys.python_bits = 64
else:
sys.python_bits = 32
else:
import struct
sys.python_bits = 8 * struct.calcsize("P")
if sys.python_bits == 32:
sys.maxsize = 2147483647
else:
sys.maxsize = int("9223372036854775807")
if fix_environ and sys.platform == "linux-android":
_fix_android_environ()
|
<SYSTEM_TASK:>
Activate the subprocess compatibility.
<END_TASK>
<USER_TASK:>
Description:
def fix_subprocess(override_debug=False, override_exception=False):
"""Activate the subprocess compatibility."""
|
import subprocess
# Exceptions
if subprocess.__dict__.get("SubprocessError") is None:
subprocess.SubprocessError = _Internal.SubprocessError
if _InternalReferences.UsedCalledProcessError is None:
if "CalledProcessError" in subprocess.__dict__:
_subprocess_called_process_error(True, subprocess)
else:
_subprocess_called_process_error(False, subprocess)
subprocess.CalledProcessError = _InternalReferences.UsedCalledProcessError
def _check_output(*args, **kwargs):
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, "
"it will be overridden.")
process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kwargs)
stdout_data, __ = process.communicate()
ret_code = process.poll()
if ret_code is None:
raise RuntimeWarning("The process is not yet terminated.")
if ret_code:
cmd = kwargs.get("args")
if cmd is None:
cmd = args[0]
raise _InternalReferences.UsedCalledProcessError(returncode=ret_code, cmd=cmd, output=stdout_data)
return stdout_data
try:
subprocess.check_output
except AttributeError:
subprocess.check_output = _check_output
|
<SYSTEM_TASK:>
Activate the full compatibility.
<END_TASK>
<USER_TASK:>
Description:
def fix_all(override_debug=False, override_all=False):
"""Activate the full compatibility."""
|
fix_base(True)
fix_builtins(override_debug)
fix_subprocess(override_debug, override_all)
return True
|
<SYSTEM_TASK:>
Scrubs from the front and back of an 'object' column in a DataFrame
<END_TASK>
<USER_TASK:>
Description:
def smart_scrub(df,col_name,error_rate = 0):
""" Scrubs from the front and back of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns a tuple of the strings removed
from the front and back of the elements
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe.
"""
|
scrubf = smart_scrubf(df,col_name,error_rate)
scrubb = smart_scrubb(df,col_name,error_rate)
return (scrubf, scrubb)
|
<SYSTEM_TASK:>
Scrubs from the front of an 'object' column in a DataFrame
<END_TASK>
<USER_TASK:>
Description:
def smart_scrubf(df,col_name,error_rate = 0):
""" Scrubs from the front of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe.
"""
|
scrubbed = ""
while True:
valcounts = df[col_name].str[:len(scrubbed)+1].value_counts()
if not len(valcounts):
break
if not valcounts[0] >= (1-error_rate) * _utils.rows(df):
break
scrubbed=valcounts.index[0]
if scrubbed == '':
return None
which = df[col_name].str.startswith(scrubbed)
_basics.col_scrubf(df,col_name,which,len(scrubbed),True)
if not which.all():
new_col_name = _basics.colname_gen(df,"{}_sf-{}".format(col_name,scrubbed))
df[new_col_name] = which
return scrubbed
|
<SYSTEM_TASK:>
Scrubs from the back of an 'object' column in a DataFrame
<END_TASK>
<USER_TASK:>
Description:
def smart_scrubb(df,col_name,error_rate = 0):
""" Scrubs from the back of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed.
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe.
"""
|
scrubbed = ""
while True:
valcounts = df[col_name].str[-len(scrubbed)-1:].value_counts()
if not len(valcounts):
break
if not valcounts[0] >= (1-error_rate) * _utils.rows(df):
break
scrubbed=valcounts.index[0]
if scrubbed == '':
return None
which = df[col_name].str.endswith(scrubbed)
_basics.col_scrubb(df,col_name,which,len(scrubbed),True)
if not which.all():
new_col_name = _basics.colname_gen(df,"{}_sb-{}".format(col_name,scrubbed))
df[new_col_name] = which
return scrubbed
|
<SYSTEM_TASK:>
Find shipping methods for order.
<END_TASK>
<USER_TASK:>
Description:
def find_all_for_order(cls, order_id, **kwargs):
"""Find shipping methods for order.
Find all shipping methods suitable for an order.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.find_all_for_order(order_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_id: Order ID to get shipping methods for. (required)
:return: page[ShippingMethod]
If the method is called asynchronously,
returns the request thread.
"""
|
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._find_all_for_order_with_http_info(order_id, **kwargs)
else:
(data) = cls._find_all_for_order_with_http_info(order_id, **kwargs)
return data
|
<SYSTEM_TASK:>
Extract span context from the given object
<END_TASK>
<USER_TASK:>
Description:
def extract(cls, obj):
""" Extract span context from the given object
:param Any obj: Object to use as context
:return: a SpanContext instance extracted from the inner span object or None if no
such span context could be found.
"""
|
span = cls.extract_span(obj)
if span:
return span.context
|
<SYSTEM_TASK:>
description of fix_additional_fields
<END_TASK>
<USER_TASK:>
Description:
def fix_additional_fields(data):
"""description of fix_additional_fields"""
|
result = dict()
for key, value in data.items():
if isinstance(value, dict):
result.update(KserSpan.to_flat_dict(key, value))
else:
result[key] = value
return result
|
<SYSTEM_TASK:>
Filter GELF record keys using exclude_patterns
<END_TASK>
<USER_TASK:>
Description:
def filter_keys(cls, data):
"""Filter GELF record keys using exclude_patterns
:param dict data: Log record has dict
:return: the filtered log record
:rtype: dict
"""
|
keys = list(data.keys())
for pattern in cls.EXCLUDE_PATTERNS:
for key in keys:
if re.match(pattern, key):
keys.remove(key)
return dict(filter(lambda x: x[0] in keys, data.items()))
|
<SYSTEM_TASK:>
Write the configuration to a local file.
<END_TASK>
<USER_TASK:>
Description:
def write_config(self):
"""Write the configuration to a local file.
:return: Boolean if successful
"""
|
json.dump(
self.config,
open(CONFIG_FILE, 'w'),
indent=4,
separators=(',', ': ')
)
return True
|
<SYSTEM_TASK:>
Load the configuration for the user or seed it with defaults.
<END_TASK>
<USER_TASK:>
Description:
def load_config(self, **kwargs):
"""Load the configuration for the user or seed it with defaults.
:return: Boolean if successful
"""
|
virgin_config = False
if not os.path.exists(CONFIG_PATH):
virgin_config = True
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_FILE):
virgin_config = True
if not virgin_config:
self.config = json.load(open(CONFIG_FILE))
else:
self.logger.info('[!] Processing whitelists, this may take a few minutes...')
process_whitelists()
if kwargs:
self.config.update(kwargs)
if virgin_config or kwargs:
self.write_config()
if 'api_key' not in self.config:
sys.stderr.write('configuration missing API key\n')
if 'email' not in self.config:
sys.stderr.write('configuration missing email\n')
if not ('api_key' in self.config and 'email' in self.config):
sys.stderr.write('Errors have been reported. Run blockade-cfg '
'to fix these warnings.\n')
try:
last_update = datetime.strptime(self.config['whitelist_date'],
"%Y-%m-%d")
current = datetime.now()
delta = (current - last_update).days
if delta > 14:
self.logger.info('[!] Refreshing whitelists, this may take a few minutes...')
process_whitelists()
self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d")
self.write_config()
except Exception as e:
self.logger.error(str(e))
self.logger.info('[!] Processing whitelists, this may take a few minutes...')
process_whitelists()
self.config['whitelist_date'] = datetime.now().strftime("%Y-%m-%d")
self.write_config()
return True
|
<SYSTEM_TASK:>
r"""Function distribution of combination of chi-squared distributions.
<END_TASK>
<USER_TASK:>
Description:
def chi2comb_cdf(q, chi2s, gcoef, lim=1000, atol=1e-4):
r"""Function distribution of combination of chi-squared distributions.
Parameters
----------
q : float
Value point at which distribution function is to be evaluated.
chi2s : ChiSquared
Chi-squared distributions.
gcoef : float
Coefficient of the standard Normal distribution.
lim : int
Maximum number of integration terms.
atol : float
Absolute error tolerance.
Returns
-------
result : float
Estimated c.d.f. evaluated at ``q``.
error : int
0: completed successfully
1: required accuracy not achieved
2: round-off error possibly significant
3: invalid parameters
4: unable to locate integration parameters
5: out of memory
info : Info
Algorithm information.
"""
|
int_type = "i"
if array(int_type, [0]).itemsize != ffi.sizeof("int"):
int_type = "l"
if array(int_type, [0]).itemsize != ffi.sizeof("int"):
raise RuntimeError("Could not infer a proper integer representation.")
if array("d", [0.0]).itemsize != ffi.sizeof("double"):
raise RuntimeError("Could not infer a proper double representation.")
q = float(q)
c_chi2s = ffi.new("struct chi2comb_chisquareds *")
c_info = ffi.new("struct chi2comb_info *")
ncents = array("d", [float(i.ncent) for i in chi2s])
coefs = array("d", [float(i.coef) for i in chi2s])
dofs = array(int_type, [int(i.dof) for i in chi2s])
c_chi2s.ncents = ffi.cast("double *", ncents.buffer_info()[0])
c_chi2s.coefs = ffi.cast("double *", coefs.buffer_info()[0])
c_chi2s.dofs = ffi.cast("int *", dofs.buffer_info()[0])
c_chi2s.n = len(chi2s)
result = ffi.new("double *")
errno = c_chi2comb_cdf(q, c_chi2s, gcoef, lim, atol, c_info, result)
info = Info()
methods = ["emag", "niterms", "nints", "intv", "truc", "sd", "ncycles"]
for i in methods:
setattr(info, i, getattr(c_info, i))
return (result[0], errno, info)
|
<SYSTEM_TASK:>
Return True or False for the given feature.
<END_TASK>
<USER_TASK:>
Description:
def show_feature(self, user, feature):
"""Return True or False for the given feature.
"""
|
user_filter = {
self.model.USER_FEATURE_FIELD: user,
}
return self.get_feature(feature).filter(
models.Q(**user_filter) | models.Q(everyone=True)).exists()
|
<SYSTEM_TASK:>
Calculates the quasiparticle weight of degenerate system of N-bands
<END_TASK>
<USER_TASK:>
Description:
def calc_z(bands, filling, interaction, hund_cu, name):
"""Calculates the quasiparticle weight of degenerate system of N-bands
at a given filling within an interaction range and saves the file"""
|
while True:
try:
data = np.load(name+'.npz')
break
except IOError:
dopout = []
for dop in filling:
slsp = Spinon(slaves=2*bands, orbitals=bands, \
hopping=[0.5]*2*bands, populations=[dop]*2*bands)
dopout.append(solve_loop(slsp, interaction, hund_cu)[0][0])
np.savez(name, zeta=dopout, u_int=interaction, doping=filling, hund=hund_cu)
return data
|
<SYSTEM_TASK:>
Generates a simple plot of the quasiparticle weight decay curves given
<END_TASK>
<USER_TASK:>
Description:
def plot_curves_z(data, name, title=None):
"""Generates a simple plot of the quasiparticle weight decay curves given
data object with doping setup"""
|
plt.figure()
for zet, c in zip(data['zeta'], data['doping']):
plt.plot(data['u_int'], zet[:, 0], label='$n={}$'.format(str(c)))
if title != None:
plt.title(title)
label_saves(name+'.png')
|
<SYSTEM_TASK:>
Generate a 2D array of the quasiparticle weight by only selecting the
<END_TASK>
<USER_TASK:>
Description:
def pick_flat_z(data):
"""Generate a 2D array of the quasiparticle weight by only selecting the
first particle data"""
|
zmes = []
for i in data['zeta']:
zmes.append(i[:, 0])
return np.asarray(zmes)
|
<SYSTEM_TASK:>
2D color plot of the quasiparticle weight as a function of interaction
<END_TASK>
<USER_TASK:>
Description:
def imshow_z(data, name):
"""2D color plot of the quasiparticle weight as a function of interaction
and doping"""
|
zmes = pick_flat_z(data)
plt.figure()
plt.imshow(zmes.T, origin='lower', \
extent=[data['doping'].min(), data['doping'].max(), \
0, data['u_int'].max()], aspect=.16)
plt.colorbar()
plt.xlabel('$n$', fontsize=20)
plt.ylabel('$U/D$', fontsize=20)
plt.savefig(name+'_imshow.png', dpi=300, format='png',
transparent=False, bbox_inches='tight', pad_inches=0.05)
|
<SYSTEM_TASK:>
Generates the plot on the convergenge of the mean field in single
<END_TASK>
<USER_TASK:>
Description:
def plot_mean_field_conv(N=1, n=0.5, Uspan=np.arange(0, 3.6, 0.5)):
"""Generates the plot on the convergenge of the mean field in single
site spin hamiltonian under with N degenerate half-filled orbitals """
|
sl = Spinon(slaves=2*N, orbitals=N, avg_particles=2*n,
hopping=[0.5]*2*N, orbital_e=[0]*2*N)
hlog = solve_loop(sl, Uspan, [0.])[1]
f, (ax1, ax2) = plt.subplots(2, sharex=True)
for field in hlog:
field = np.asarray(field)
ax1.semilogy(abs(field[1:]-field[:-1]))
ax2.plot(field)#, label = 'h, U = {}'.format(Uint))
plt.title('Convergence of selfconsintent mean field')
ax1.set_ylabel('$\\Delta h$')
ax2.set_ylabel('mean field $h$')
plt.xlabel('iterations')
return hlog
|
<SYSTEM_TASK:>
Returns the model properties as a dict
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
"""Returns the model properties as a dict"""
|
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Payment, dict):
for key, value in self.items():
result[key] = value
return result
|
<SYSTEM_TASK:>
Method to return back a loaded instance.
<END_TASK>
<USER_TASK:>
Description:
def from_config(cls):
"""Method to return back a loaded instance."""
|
config = Config()
client = cls(
email=config.get('email'),
api_key=config.get('api_key'),
server=config.get('api_server'),
http_proxy=config.get('http_proxy'),
https_proxy=config.get('https_proxy'),
)
return client
|
<SYSTEM_TASK:>
Return the URL for the action.
<END_TASK>
<USER_TASK:>
Description:
def _endpoint(self, endpoint, action, *url_args):
"""Return the URL for the action.
:param str endpoint: The controller
:param str action: The action provided by the controller
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:return: Full URL for the requested action
"""
|
args = (self.api_base, endpoint, action)
if action == '':
args = (self.api_base, endpoint)
api_url = "/".join(args)
if url_args:
if len(url_args) == 1:
api_url += "/" + url_args[0]
else:
api_url += "/".join(url_args)
return api_url
|
<SYSTEM_TASK:>
JSON response from server.
<END_TASK>
<USER_TASK:>
Description:
def _json(self, response):
"""JSON response from server.
:param response: Response from the server
:throws ValueError: from requests' response.json() error
:return: response deserialized from JSON
"""
|
if response.status_code == 204:
return None
try:
return response.json()
except ValueError as e:
raise ValueError(
'Exception: %s\n'
'request: %s, response code: %s, response: %s' % (
str(e), response.request.url, response.status_code,
response.content,
)
)
|
<SYSTEM_TASK:>
Request API Endpoint - for GET methods.
<END_TASK>
<USER_TASK:>
Description:
def _get(self, endpoint, action, *url_args, **url_params):
"""Request API Endpoint - for GET methods.
:param str endpoint: Endpoint
:param str action: Endpoint Action
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:param url_params: Parameters to pass to url, typically query string
:return: response deserialized from JSON
"""
|
api_url = self._endpoint(endpoint, action, *url_args)
kwargs = {'headers': self.headers, 'params': url_params,
'timeout': Client.TIMEOUT, 'verify': self.verify}
if self.proxies:
kwargs['proxies'] = self.proxies
self.logger.debug("Requesting: %s, %s" % (api_url, str(kwargs)))
response = requests.get(api_url, **kwargs)
return self._json(response)
|
<SYSTEM_TASK:>
Submit to API Endpoint - for DELETE, PUT, POST methods.
<END_TASK>
<USER_TASK:>
Description:
def _send_data(self, method, endpoint, action,
data, *url_args, **url_params):
"""Submit to API Endpoint - for DELETE, PUT, POST methods.
:param str method: Method to use for the request
:param str endpoint: Endpoint
:param str action: Endpoint Action
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:param url_params: Parameters to pass to url, typically query string
:return: response deserialized from JSON
"""
|
api_url = self._endpoint(endpoint, action, *url_args)
data.update({'email': self.email, 'api_key': self.api_key})
data = json.dumps(data)
kwargs = {'headers': self.headers, 'params': url_params,
'verify': self.verify, 'data': data}
if self.proxies:
kwargs['proxies'] = self.proxies
self.logger.debug("Requesting: %s %s, %s" % (method, api_url,
str(kwargs)))
response = requests.request(method, api_url, **kwargs)
self.logger.debug("Response: %d, %s" % (response.status_code,
response.content))
return self._json(response)
|
<SYSTEM_TASK:>
Process all the events for logging and S3.
<END_TASK>
<USER_TASK:>
Description:
def process_events(events, source_ip):
"""Process all the events for logging and S3."""
|
s3 = boto3.resource('s3')
table = boto3.resource("dynamodb").Table(os.environ['database'])
with table.batch_writer() as batch:
for idx, event in enumerate(events):
event = convert_keys_to_string(event)
event['sourceIp'] = source_ip
event['event'] = hashlib.sha256(str(event)).hexdigest()
metadata = event['metadata']
timestamp = str(event['metadata']['timeStamp'])
event['metadata']['timeStamp'] = timestamp
kwargs = {'match': event['indicatorMatch'],
'type': metadata['type'],
'method': metadata['method'].lower(),
'time': event['analysisTime'], 'ip': source_ip}
file_struct = '{match}_{type}_{method}_{ip}_{time}.json'
file_name = file_struct.format(**kwargs)
key_path = '/tmp/%s' % file_name
output = json.dumps(event, indent=4, sort_keys=True)
open(key_path, "w").write(output)
data = open(key_path, 'rb')
s3.Bucket(os.environ['s3_bucket']).put_object(Key=file_name,
Body=data)
logger.info("EVENT: %s" % str(event))
batch.put_item(Item=event)
return True
|
<SYSTEM_TASK:>
Sets the cost_type of this TableRateShipping.
<END_TASK>
<USER_TASK:>
Description:
def cost_type(self, cost_type):
"""Sets the cost_type of this TableRateShipping.
:param cost_type: The cost_type of this TableRateShipping.
:type: str
"""
|
allowed_values = ["orderSubtotal", "weight"]
if cost_type is not None and cost_type not in allowed_values:
raise ValueError(
"Invalid value for `cost_type` ({0}), must be one of {1}"
.format(cost_type, allowed_values)
)
self._cost_type = cost_type
|
<SYSTEM_TASK:>
Authenticate request using HTTP Basic authentication protocl.
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, request):
""" Authenticate request using HTTP Basic authentication protocl.
If the user is successfully identified, the corresponding user
object is stored in `request.user`. If the request has already
been authenticated (i.e. `request.user` has authenticated user
object), this function does nothing.
Raises Forbidden or Unauthorized if the user authentication
fails. If no exception is thrown, the `request.user` will
contain authenticated user object.
"""
|
# todo: can we trust that request.user variable is even defined?
if request.user and request.user.is_authenticated():
return request.user
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':')
user = authenticate(username=uname, password=passwd)
if user is not None:
if user.is_active:
request.user = user
return user
else:
raise Forbidden()
# either no auth header or using some other auth protocol,
# we'll return a challenge for the user anyway
raise Unauthorized()
|
<SYSTEM_TASK:>
Clean up previous garbage
<END_TASK>
<USER_TASK:>
Description:
def clean():
"""Clean up previous garbage"""
|
os.chdir(os.path.join(project_root, 'docs'))
sh("make clean")
os.chdir(project_root)
sh("rm -rf pyoauth2.egg-info")
|
<SYSTEM_TASK:>
generator that holds a rolling average
<END_TASK>
<USER_TASK:>
Description:
def average():
""" generator that holds a rolling average """
|
count = 0
total = total()
i=0
while 1:
i = yield ((total.send(i)*1.0)/count if count else 0)
count += 1
|
<SYSTEM_TASK:>
Parsed command-line arguments.
<END_TASK>
<USER_TASK:>
Description:
def args(self):
"""Parsed command-line arguments."""
|
if self._args is None:
parser = self._build_parser()
self._args = parser.parse_args()
return self._args
|
<SYSTEM_TASK:>
Build the string representing the parsed package basename.
<END_TASK>
<USER_TASK:>
Description:
def build_pypackage_basename(self, pytree, base):
"""Build the string representing the parsed package basename.
:param str pytree: The pytree absolute path.
:param str pytree: The absolute path of the pytree sub-package of which determine the
parsed name.
:rtype: str
"""
|
dirname = os.path.dirname(pytree)
parsed_package_name = base.replace(dirname, '').strip('/')
return parsed_package_name
|
<SYSTEM_TASK:>
Build the needed command-line parser.
<END_TASK>
<USER_TASK:>
Description:
def _build_parser(self):
"""Build the needed command-line parser."""
|
parser = argparse.ArgumentParser()
parser.add_argument('--pytree',
required=True,
type=self._valid_directory,
help='This is the path, absolute or relative, of the Python package '
'that is to be parsed.')
parser.add_argument('--doctree',
required=True,
type=self._valid_directory,
help='This is the path, absolute or relative, of the documentation '
'package that is to be parsed.')
parser.add_argument('--no-fail',
action='store_true',
help='Using this option will cause this program to return an exit '
'code of 0 even when the given trees do not match.')
parser.add_argument('--doc-ignores',
action=AddDocIgnores,
help='A comma separated list of additional doc files to ignore')
return parser
|
<SYSTEM_TASK:>
Build the expected Python file name based on the given documentation file name.
<END_TASK>
<USER_TASK:>
Description:
def build_pyfile_path_from_docname(self, docfile):
"""Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
"""
|
name, ext = os.path.splitext(docfile)
expected_py_name = name.replace('.', '/') + '.py'
return expected_py_name
|
<SYSTEM_TASK:>
Calculate the differences between the given trees.
<END_TASK>
<USER_TASK:>
Description:
def calculate_tree_differences(self, pytree, doctree):
"""Calculate the differences between the given trees.
:param dict pytree: The dictionary of the parsed Python tree.
:param dict doctree: The dictionary of the parsed documentation tree.
:rtype: tuple
:returns: A two-tuple of sets, where the first is the missing Python files, and the second
is the missing documentation files.
"""
|
pykeys = set(pytree.keys())
dockeys = set(doctree.keys())
# Calculate the missing documentation files, if any.
missing_doc_keys = pykeys - dockeys
missing_docs = {pytree[pyfile] for pyfile in missing_doc_keys}
# Calculate the missing Python files, if any.
missing_py_keys = dockeys - pykeys
missing_pys = {docfile for docfile in missing_py_keys}
return missing_pys, missing_docs
|
<SYSTEM_TASK:>
Compare the given parsed trees.
<END_TASK>
<USER_TASK:>
Description:
def compare_trees(self, parsed_pytree, parsed_doctree):
"""Compare the given parsed trees.
:param dict parsed_pytree: A dictionary representing the parsed Python tree where each
key is a parsed Python file and its key is its expected rst file name.
"""
|
if parsed_pytree == parsed_doctree:
return 0
missing_pys, missing_docs = self.calculate_tree_differences(pytree=parsed_pytree,
doctree=parsed_doctree)
self.pprint_tree_differences(missing_pys=missing_pys, missing_docs=missing_docs)
return 0 if self.args.no_fail else 1
|
<SYSTEM_TASK:>
Parse the given documentation tree.
<END_TASK>
<USER_TASK:>
Description:
def parse_doc_tree(self, doctree, pypackages):
"""Parse the given documentation tree.
:param str doctree: The absolute path to the documentation tree which is to be parsed.
:param set pypackages: A set of all Python packages found in the pytree.
:rtype: dict
:returns: A dict where each key is the path of an expected Python module and its value is
the parsed rst module name (relative to the documentation tree).
"""
|
parsed_doctree = {}
for filename in os.listdir(doctree):
if self._ignore_docfile(filename):
continue
expected_pyfile = self.build_pyfile_path_from_docname(filename)
parsed_doctree[expected_pyfile] = filename
pypackages = {name + '.py' for name in pypackages}
return {elem: parsed_doctree[elem] for elem in parsed_doctree if elem not in pypackages}
|
<SYSTEM_TASK:>
Parse the given Python package tree.
<END_TASK>
<USER_TASK:>
Description:
def parse_py_tree(self, pytree):
"""Parse the given Python package tree.
:param str pytree: The absolute path to the Python tree which is to be parsed.
:rtype: dict
:returns: A two-tuple. The first element is a dict where each key is the path of a parsed
Python module (relative to the Python tree) and its value is the expected rst module
name. The second element is a set where each element is a Python package or
sub-package.
:rtype: tuple
"""
|
parsed_pytree = {}
pypackages = set()
for base, dirs, files in os.walk(pytree):
if self._ignore_pydir(os.path.basename(base)):
continue
# TODO(Anthony): If this is being run against a Python 3 package, this needs to be
# adapted to account for namespace packages.
elif '__init__.py' not in files:
continue
package_basename = self.build_pypackage_basename(pytree=pytree, base=base)
pypackages.add(package_basename)
for filename in files:
if self._ignore_pyfile(filename):
continue
parsed_path = os.path.join(package_basename, filename)
parsed_pytree[parsed_path] = self.build_rst_name_from_pypath(parsed_path)
return parsed_pytree, pypackages
|
<SYSTEM_TASK:>
Pprint the missing files of each given set.
<END_TASK>
<USER_TASK:>
Description:
def pprint_tree_differences(self, missing_pys, missing_docs):
"""Pprint the missing files of each given set.
:param set missing_pys: The set of missing Python files.
:param set missing_docs: The set of missing documentation files.
:rtype: None
"""
|
if missing_pys:
print('The following Python files appear to be missing:')
for pyfile in missing_pys:
print(pyfile)
print('\n')
if missing_docs:
print('The following documentation files appear to be missing:')
for docfiile in missing_docs:
print(docfiile)
print('\n')
|
<SYSTEM_TASK:>
Ensure that the given path is valid.
<END_TASK>
<USER_TASK:>
Description:
def _valid_directory(self, path):
"""Ensure that the given path is valid.
:param str path: A valid directory path.
:raises: :py:class:`argparse.ArgumentTypeError`
:returns: An absolute directory path.
"""
|
abspath = os.path.abspath(path)
if not os.path.isdir(abspath):
raise argparse.ArgumentTypeError('Not a valid directory: {}'.format(abspath))
return abspath
|
<SYSTEM_TASK:>
Parse package trees and report on any discrepancies.
<END_TASK>
<USER_TASK:>
Description:
def main(self):
"""Parse package trees and report on any discrepancies."""
|
args = self.args
parsed_pytree, pypackages = self.parse_py_tree(pytree=args.pytree)
parsed_doctree = self.parse_doc_tree(doctree=args.doctree, pypackages=pypackages)
return self.compare_trees(parsed_pytree=parsed_pytree, parsed_doctree=parsed_doctree)
|
<SYSTEM_TASK:>
delete by ip and object id
<END_TASK>
<USER_TASK:>
Description:
def delete_where_unique(cls, ip, object_id, location):
""" delete by ip and object id """
|
result = cls.where_unique(ip, object_id, location)
if result is None:
return None
result.delete()
return True
|
<SYSTEM_TASK:>
Used internally to send a request to the API, left public
<END_TASK>
<USER_TASK:>
Description:
def do_req(self, method, url, body=None, headers=None, status=None):
"""Used internally to send a request to the API, left public
so it can be used to talk to the API more directly.
"""
|
if body is None:
body = ''
else:
body = json.dumps(body)
res = self.backend.dispatch_request(method=method,
url=url,
body=body,
headers=self.get_headers(headers),
auth=self.auth)
if not isinstance(res, MapiResponse):
res = MapiResponse(*res)
if status is None:
if res.status // 100 != 2:
raise MapiError(*res)
elif res.status != status:
raise MapiError(*res)
return res
|
<SYSTEM_TASK:>
GETs the url provided and traverses the 'next' url that's
<END_TASK>
<USER_TASK:>
Description:
def _depaginate_all(self, url):
"""GETs the url provided and traverses the 'next' url that's
returned while storing the data in a list. Returns a single list of all
items.
"""
|
items = []
for x in self._depagination_generator(url):
items += x
return items
|
<SYSTEM_TASK:>
u"""Create user for the Merchant given in the X-Mcash-Merchant header.
<END_TASK>
<USER_TASK:>
Description:
def create_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
u"""Create user for the Merchant given in the X-Mcash-Merchant header.
Arguments:
user_id:
Identifier for the user
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing
"""
|
arguments = {'id': user_id,
'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('POST', self.merchant_api_base_url + '/user/', arguments).json()
|
<SYSTEM_TASK:>
Update user. Returns the raw response object.
<END_TASK>
<USER_TASK:>
Description:
def update_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
"""Update user. Returns the raw response object.
Arguments:
user_id:
User id of user to update
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing
"""
|
arguments = {'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('PUT',
self.merchant_api_base_url + '/user/' +
user_id + '/', arguments)
|
<SYSTEM_TASK:>
Create POS resource
<END_TASK>
<USER_TASK:>
Description:
def create_pos(self, name, pos_type,
pos_id, location=None):
"""Create POS resource
Arguments:
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
pos_id:
The ID of the POS that is to be created. Has to be unique for
the merchant
"""
|
arguments = {'name': name,
'type': pos_type,
'id': pos_id,
'location': location}
return self.do_req('POST', self.merchant_api_base_url + '/pos/', arguments).json()
|
<SYSTEM_TASK:>
Update POS resource. Returns the raw response object.
<END_TASK>
<USER_TASK:>
Description:
def update_pos(self, pos_id, name, pos_type, location=None):
"""Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
"""
|
arguments = {'name': name,
'type': pos_type,
'location': location}
return self.do_req('PUT',
self.merchant_api_base_url + '/pos/' +
pos_id + '/', arguments)
|
<SYSTEM_TASK:>
Post payment request. The call is idempotent; that is, if one posts
<END_TASK>
<USER_TASK:>
Description:
def create_payment_request(self, customer, currency, amount, allow_credit,
pos_id, pos_tid, action, ledger=None,
display_message_uri=None, callback_uri=None,
additional_amount=None, additional_edit=None,
text=None, expires_in=None, required_scope=None,
required_scope_text=None, links=None, line_items=None):
"""Post payment request. The call is idempotent; that is, if one posts
the same pos_id and pos_tid twice, only one payment request is created.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
customer:
Customer identifiers include msisdn, scan token or access token
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
additional_edit:
Whether user is allowed to additional amount for gratuity or
similar
allow_credit:
Whether to allow credit payment for this payment request.
Credit incurs interchange
pos_id:
The POS this payment request originates from, used for
informing user about origin
pos_tid:
Local transaction id for POS. This must be unique for the POS
text:
Text that is shown to user when asked to pay. This can contain
linebreaks and the text has to fit on smartphones screens.
action:
Action to perform, the main difference is what it looks like in
App UI.
expires_in:
Expiration in seconds from when server received request
required_scope:
Scopes required to fulfill payment
required_scope_text:
Text that is shown to user when asked for permission.
links:
A list of links to be shown in app in various states
[{"uri": "http://example.com/uri1",
"caption": "This is always shown",
"show_on": ["pending", "fail", "ok"]}]
line_items:
A list of product lines in the payment request. Each item should
contain product_id, vat, description (optional), vat_rate, total,
item_cost, quantity and optionally tags, which is a list of tag
dicts containing tag_id and label. The sum of all
line item totals must be equal to the amount argument.
[{"product_id": "product-1", vat: "0.50",
description: "An optional description", vat_rate: "0.25",
total: "5.00", item_cost: "2.50", quantity: "2", "tags": [
{"tag_id": "product-info-5", "label": "Some product info"}
]}]
"""
|
arguments = {'customer': customer,
'currency': currency,
'amount': amount,
'allow_credit': allow_credit,
'pos_id': pos_id,
'pos_tid': pos_tid,
'action': action,
'ledger': ledger,
'display_message_uri': display_message_uri,
'callback_uri': callback_uri,
'additional_amount': additional_amount,
'additional_edit': additional_edit,
'text': text,
'expires_in': expires_in}
if required_scope:
arguments['required_scope'] = required_scope
arguments['required_scope_text'] = required_scope_text
if links:
arguments['links'] = links
if line_items:
arguments['line_items'] = line_items
return self.do_req('POST', self.merchant_api_base_url + '/payment_request/',
arguments).json()
|
<SYSTEM_TASK:>
Update payment request, reauthorize, capture, release or abort
<END_TASK>
<USER_TASK:>
Description:
def update_payment_request(self, tid, currency=None, amount=None,
action=None, ledger=None, callback_uri=None,
display_message_uri=None, capture_id=None,
additional_amount=None, text=None, refund_id=None,
required_scope=None, required_scope_text=None, line_items=None):
"""Update payment request, reauthorize, capture, release or abort
It is possible to update ledger and the callback URIs for a payment
request. Changes are always appended to the open report of a ledger,
and notifications are sent to the callback registered at the time of
notification.
Capturing an authorized payment or reauthorizing is done with the
action field.
The call is idempotent; that is, if one posts the same amount,
additional_amount and capture_id twice with action CAPTURE, only one
capture is performed. Similarly, if one posts twice with action CAPTURE
without any amount stated, to capture the full amount, only one full
capture is performed.
Arguments:
ledger:
Log entries will be added to the open report on the specified
ledger
display_message_uri:
Messages that can be used to inform the POS operator about the
progress of the payment request will be POSTed to this URI if
provided
callback_uri:
If provided, mCASH will POST to this URI when the status of the
payment request changes, using the message mechanism described
in the introduction. The data in the "object" part of the
message is the same as what can be retrieved by calling GET on
the "/payment_request/<tid>/outcome/" resource URI.
currency:
3 chars https://en.wikipedia.org/wiki/ISO_4217
amount:
The base amount of the payment
additional_amount:
Typically cash withdrawal or gratuity
capture_id:
Local id for capture. Must be set if amount is set, otherwise
capture_id must be unset.
tid:
Transaction id assigned by mCASH
refund_id:
Refund id needed when doing partial refund
text:
For example reason for refund.
action:
Action to perform.
required_scope:
Scopes required to fulfill payment
line_items:
An updated line_items. Will fail if line_items
already set in the payment request or if the sum of the totals
is different from the original amount.
required_scope_text:
Text that is shown to user when asked for permission.
"""
|
arguments = {'ledger': ledger,
'display_message_uri': display_message_uri,
'callback_uri': callback_uri,
'currency': currency,
'amount': amount,
'additional_amount': additional_amount,
'capture_id': capture_id,
'action': action,
'text': text,
'refund_id': refund_id}
if required_scope:
arguments['required_scope'] = required_scope
arguments['required_scope_text'] = required_scope_text
if line_items:
arguments['line_items'] = line_items
arguments = {k: v for k, v in arguments.items() if v is not None}
return self.do_req('PUT',
self.merchant_api_base_url + '/payment_request/' +
tid + '/', arguments)
|
<SYSTEM_TASK:>
post a chat message
<END_TASK>
<USER_TASK:>
Description:
def post_chat_message(self, merchant_id, channel_id, message):
"""post a chat message
Arguments:
channel_id:
Scan token
"""
|
return self.do_req('POST',
self.base_url + '/chat/v1/merchant/%s/channel/%s/message/' % (merchant_id, channel_id),
message)
|
<SYSTEM_TASK:>
Register new shortlink
<END_TASK>
<USER_TASK:>
Description:
def create_shortlink(self, callback_uri=None,
description=None, serial_number=None):
"""Register new shortlink
Arguments:
callback_uri:
URI called by mCASH when user scans shortlink
description:
Shortlink description displayed in confirmation dialogs
serial_number:
Serial number on printed QR codes. This field is only used when
registering printed stickers issued by mCASH
"""
|
arguments = {'callback_uri': callback_uri,
'description': description,
'serial_number': serial_number}
return self.do_req('POST', self.merchant_api_base_url + '/shortlink/',
arguments).json()
|
<SYSTEM_TASK:>
Update existing shortlink registration
<END_TASK>
<USER_TASK:>
Description:
def update_shortlink(self, shortlink_id, callback_uri=None,
description=None):
"""Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH
"""
|
arguments = {'callback_uri': callback_uri,
'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/shortlink/' +
shortlink_id + '/', arguments)
|
<SYSTEM_TASK:>
Retrieve registered shortlink info
<END_TASK>
<USER_TASK:>
Description:
def get_shortlink(self, shortlink_id_or_url):
"""Retrieve registered shortlink info
Arguments:
shortlink_id_or_url:
Shortlink id or url, assigned by mCASH
"""
|
if "://" not in shortlink_id_or_url:
shortlink_id_or_url = self.merchant_api_base_url + '/shortlink/' + shortlink_id_or_url + '/'
return self.do_req('GET', shortlink_id_or_url).json()
|
<SYSTEM_TASK:>
u"""Close Report
<END_TASK>
<USER_TASK:>
Description:
def close_report(self, ledger_id, report_id, callback_uri=None):
u"""Close Report
When you PUT to a report, it will start the process of closing it. When
the closing process is complete (i.e. when report.status == 'closed')
mCASH does a POST call to callback_uri, if provided. This call will
contain JSON data similar to when GETing the Report.
Closing a report automatically open a new one.
The contents of a GET
/merchant/v1/ledger/<ledger_id>/report/<report_id>/ is included in
callback if callback is a secure URI, otherwise the link itself is sent
in callback.
Arguments:
ledger_id:
Id for ledger for report
report_id:
Report id assigned by mCASH
callback_uri:
Callback URI to be called when Report has finished closing.
"""
|
arguments = {'callback_uri': callback_uri}
return self.do_req('PUT',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/report/' +
report_id + '/', arguments)
|
<SYSTEM_TASK:>
Create permission request
<END_TASK>
<USER_TASK:>
Description:
def create_permission_request(self, customer, pos_id, pos_tid, scope,
ledger=None, text=None, callback_uri=None,
expires_in=None):
"""Create permission request
The call is idempotent; that is, if one posts the same pos_id and
pos_tid twice, only one Permission request is created.
"""
|
arguments = {'customer': customer,
'pos_id': pos_id,
'pos_tid': pos_tid,
'scope': scope,
'ledger': ledger,
'text': text,
'callback_uri': callback_uri,
'expires_in': expires_in}
return self.do_req('POST',
self.merchant_api_base_url + '/permission_request/',
arguments).json()
|
<SYSTEM_TASK:>
Upload a receipt to the give url
<END_TASK>
<USER_TASK:>
Description:
def upload_receipt(self, url, data):
"""Upload a receipt to the give url
:param url:
:param data:
:return:
"""
|
return self.upload_attachment(url=url, data=data, mime_type='application/vnd.mcash.receipt.v1+json')
|
<SYSTEM_TASK:>
This is a safe function handler for any twitter request.
<END_TASK>
<USER_TASK:>
Description:
def safe_twitter_request_handler(twitter_api_func,
call_rate_limit,
call_counter,
time_window_start,
max_retries,
wait_period,
*args, **kw):
"""
This is a safe function handler for any twitter request.
Inputs: - twitter_api_func: The twython function object to be safely called.
- call_rate_limit: THe call rate limit for this specific Twitter API function.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- max_retries: Number of call retries allowed before abandoning the effort.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
- *args, **kw: The parameters of the twython function to be called.
Outputs: - twitter_api_function_result: The results of the Twitter function.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
Raises: - twython.TwythonError
- urllib.error.URLError
- http.client.BadStatusLine
"""
|
error_count = 0
while True:
try:
# If we have reached the call rate limit for this function:
if call_counter >= call_rate_limit:
# Reset counter.
call_counter = 0
# Sleep for the appropriate time.
elapsed_time = time.perf_counter() - time_window_start
sleep_time = 15*60 - elapsed_time
if sleep_time < 0.1:
sleep_time = 0.1
time.sleep(sleep_time)
# Initialize new 15-minute time window.
time_window_start = time.perf_counter()
else:
call_counter += 1
twitter_api_function_result = twitter_api_func(*args, **kw)
return twitter_api_function_result, call_counter, time_window_start
except twython.TwythonError as e:
# If it is a Twitter error, handle it.
error_count, call_counter, time_window_start, wait_period = handle_twitter_http_error(e,
error_count,
call_counter,
time_window_start,
wait_period)
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
except URLError as e:
error_count += 1
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
except BadStatusLine as e:
error_count += 1
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
|
<SYSTEM_TASK:>
This function handles the twitter request in case of an HTTP error.
<END_TASK>
<USER_TASK:>
Description:
def handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period):
"""
This function handles the twitter request in case of an HTTP error.
Inputs: - e: A twython.TwythonError instance to be handled.
- error_count: Number of failed retries of the call until now.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Raises: - twython.TwythonError
"""
|
if e.error_code == 401:
# Encountered 401 Error (Not Authorized)
raise e
elif e.error_code == 404:
# Encountered 404 Error (Not Found)
raise e
elif e.error_code == 429:
# Encountered 429 Error (Rate Limit Exceeded)
# Sleep for 15 minutes
error_count += 0.5
call_counter = 0
wait_period = 2
time.sleep(60*15 + 5)
time_window_start = time.perf_counter()
return error_count, call_counter, time_window_start, wait_period
elif e.error_code in (500, 502, 503, 504):
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
return error_count, call_counter, time_window_start, wait_period
else:
raise e
|
<SYSTEM_TASK:>
Does all of the processing required to create a bundle and write it to disk, returning its hash version
<END_TASK>
<USER_TASK:>
Description:
def make_bundle(bundle, fixed_version=None):
"""
Does all of the processing required to create a bundle and write it to disk, returning its hash version
"""
|
tmp_output_file_name = '%s.%s.%s' % (os.path.join(bundle.bundle_file_root, bundle.bundle_filename), 'temp', bundle.bundle_type)
iter_input = iter_bundle_files(bundle)
output_pipeline = processor_pipeline(bundle.processors, iter_input)
m = md5()
with open(tmp_output_file_name, 'wb') as output_file:
for chunk in output_pipeline:
m.update(chunk)
output_file.write(chunk)
hash_version = fixed_version or m.hexdigest()
output_file_name = bundle.get_path(hash_version)
os.rename(tmp_output_file_name, output_file_name)
return hash_version
|
<SYSTEM_TASK:>
returns a plain text string when given a html string text
<END_TASK>
<USER_TASK:>
Description:
def html_to_text(html_string):
"""
returns a plain text string when given a html string text
handles a, p, h1 to h6 and br, inserts newline chars to
create space in the string
@todo handle images
"""
|
# create a valid html document from string
# beware that it inserts <hmtl> <body> and <p> tags
# where needed
html_tree = html.document_fromstring(html_string)
# handle header tags
for h in html_tree.cssselect("h1, h2, h3, h4, h5, h6"):
# add two newlines after a header tag
h.text = h.text + '\n\n'
# handle links
# find all a tags starting from the root of the document //
# and replace the link with (link)
for a in html_tree.xpath("//a"):
href = a.attrib['href']
a.text = a.text + " (" + href + ")"
# handle paragraphs
for p in html_tree.xpath("//p"):
# keep the tail if there is one
# or add two newlines after the text if there is no tail
p.tail = p.tail if p.tail else "\n\n"
# handle breaks
for br in html_tree.xpath("//br"):
# add a newline and then the tail (remaining text after the <br/> tag)
# or add a newline only if there is no tail
# http://stackoverflow.com/questions/18660382/how-can-i-preserve-br-as-newlines-with-lxml-html-text-content-or-equivalent?rq=1
br.tail = "\n" + br.tail if br.tail else "\n"
return html_tree.text_content()
|
<SYSTEM_TASK:>
By default generates a random string of 10 chars composed
<END_TASK>
<USER_TASK:>
Description:
def random_string(**kwargs):
"""
By default generates a random string of 10 chars composed
of digits and ascii lowercase letters. String length and pool can
be override by using kwargs. Pool must be a list of strings
"""
|
n = kwargs.get('length', 10)
pool = kwargs.get('pool') or string.digits + string.ascii_lowercase
return ''.join(random.SystemRandom().choice(pool) for _ in range(n))
|
<SYSTEM_TASK:>
wrapper for usage of profiling
<END_TASK>
<USER_TASK:>
Description:
def _run_parallel_process_with_profiling(self, start_path, stop_path, queue, filename):
"""
wrapper for usage of profiling
"""
|
runctx('Engine._run_parallel_process(self, start_path, stop_path, queue)', globals(), locals(), filename)
|
<SYSTEM_TASK:>
The function calls _run_process and puts results produced by
<END_TASK>
<USER_TASK:>
Description:
def _run_parallel_process(self, start_path, stop_path, queue):
"""
The function calls _run_process and puts results produced by
consumer at observations of top most consumer in to the queue
"""
|
process_num = int(current_process().name.split('-', 2)[1])
self._run_process(start_path, stop_path, process_num)
queue.put(self.consumer.put())
|
<SYSTEM_TASK:>
The function calls _run_path for given set of paths
<END_TASK>
<USER_TASK:>
Description:
def _run_process(self, start_path, stop_path, process_num=0):
"""
The function calls _run_path for given set of paths
"""
|
# pre processing
self.producer.initialize_worker(process_num)
self.consumer.initialize_worker(process_num)
# processing
for path in range(start_path, stop_path):
self._run_path(path)
# post processing
self.consumer.finalize_worker(process_num)
|
<SYSTEM_TASK:>
standalone function implementing a single loop of Monte Carlo
<END_TASK>
<USER_TASK:>
Description:
def _run_path(self, path_num):
"""
standalone function implementing a single loop of Monte Carlo
It returns list produced by consumer at observation dates
:param int path_num: path number
"""
|
# pre processing
self.producer.initialize_path(path_num)
self.consumer.initialize_path(path_num)
# processing
for new_date in self.grid:
state = self.producer.evolve(new_date)
self.consumer.consume(state)
# post processing
self.consumer.finalize_path(path_num)
|
<SYSTEM_TASK:>
reinitialize consumer for process in multiprocesing
<END_TASK>
<USER_TASK:>
Description:
def initialize_worker(self, process_num=None):
"""
reinitialize consumer for process in multiprocesing
"""
|
self.initialize(self.grid, self.num_of_paths, self.seed)
|
<SYSTEM_TASK:>
initialize consumer for next path
<END_TASK>
<USER_TASK:>
Description:
def initialize_path(self, path_num=None):
"""
initialize consumer for next path
"""
|
self.state = copy(self.initial_state)
return self.state
|
<SYSTEM_TASK:>
consume new producer state
<END_TASK>
<USER_TASK:>
Description:
def consume(self, state):
"""
consume new producer state
"""
|
self.state.append(self.func(state))
return self.state
|
<SYSTEM_TASK:>
to get states from multiprocessing.queue
<END_TASK>
<USER_TASK:>
Description:
def get(self, queue_get):
"""
to get states from multiprocessing.queue
"""
|
if isinstance(queue_get, (tuple, list)):
self.result.extend(queue_get)
|
<SYSTEM_TASK:>
Return files found by the file-finder 'ff'.
<END_TASK>
<USER_TASK:>
Description:
def walk_revctrl(dirname='', ff=''):
"""Return files found by the file-finder 'ff'.
"""
|
file_finder = None
items = []
if not ff:
distutils.log.error('No file-finder passed to walk_revctrl')
sys.exit(1)
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
if ff == ep.name:
distutils.log.info('using %s file-finder', ep.name)
file_finder = ep.load()
finder_items = []
with pythonpath_off():
for item in file_finder(dirname):
if not basename(item).startswith(('.svn', '.hg', '.git')):
finder_items.append(item)
distutils.log.info('%d files found', len(finder_items))
items.extend(finder_items)
if file_finder is None:
distutils.log.error('Failed to load %s file-finder; setuptools-%s extension missing?',
ff, 'subversion' if ff == 'svn' else ff)
sys.exit(1)
# Returning a non-empty list prevents egg_info from reading the
# existing SOURCES.txt
return items or ['']
|
<SYSTEM_TASK:>
Remove .pyc files we leave around because of import.
<END_TASK>
<USER_TASK:>
Description:
def cleanup_pycache():
"""Remove .pyc files we leave around because of import.
"""
|
try:
for file in glob.glob('setup.py[co]'):
os.remove(file)
if isdir('__pycache__'):
for file in glob.glob(join('__pycache__', 'setup.*.py[co]')):
os.remove(file)
if not glob.glob(join('__pycache__', '*')):
os.rmdir('__pycache__')
except (IOError, OSError):
pass
|
<SYSTEM_TASK:>
Get the iterator over the sorted items.
<END_TASK>
<USER_TASK:>
Description:
def _get_sorted_iterator(self, iterator):
"""
Get the iterator over the sorted items.
This function decides whether the items can be sorted in memory or on disk.
:return:
"""
|
lines = list(next(iterator))
if len(lines) < self.max_lines:
return iter(sorted(lines, key=self.key))
import tempfile
tmp_dir = tempfile.mkdtemp()
fnames = self._split(chain([lines], iterator), tmp_dir)
return SortedIteratorMerger([unpickle_iter(open(fname, 'rb')) for fname in fnames], self.key)
|
<SYSTEM_TASK:>
Splits the file into several chunks.
<END_TASK>
<USER_TASK:>
Description:
def _split(self, iterator, tmp_dir):
"""
Splits the file into several chunks.
If the original file is too big to fit in the allocated space, the sorting will be split into several chunks,
then merged.
:param tmp_dir: Where to put the intermediate sorted results.
:param orig_lines: The lines read before running out of space.
:return: The names of the intermediate files.
"""
|
fnames = []
for i, lines in enumerate(iterator):
lines = list(lines)
out_fname = os.path.join(tmp_dir, self.TMP_FNAME.format(i + 1))
self._write(lines, out_fname)
fnames.append(out_fname)
if len(lines) < self.max_lines:
break
return fnames
|
<SYSTEM_TASK:>
Writes a intermediate temporary sorted file
<END_TASK>
<USER_TASK:>
Description:
def _write(self, lines, fname):
"""
Writes a intermediate temporary sorted file
:param lines: The lines to write.
:param fname: The name of the temporary file.
:return:
"""
|
with open(fname, 'wb') as out_fhndl:
for line in sorted(lines, key=self.key):
pickle.dump(line, out_fhndl)
|
<SYSTEM_TASK:>
r"""Combign date and time into an iso datetime.
<END_TASK>
<USER_TASK:>
Description:
def get_iso_time(date_part, time_part):
r"""Combign date and time into an iso datetime."""
|
str_date = datetime.datetime.strptime(
date_part, '%m/%d/%Y').strftime('%Y-%m-%d')
str_time = datetime.datetime.strptime(
time_part, '%I:%M %p').strftime('%H:%M:%S')
return str_date + "T" + str_time + "-7:00"
|
<SYSTEM_TASK:>
Pulls the list of users in a client.
<END_TASK>
<USER_TASK:>
Description:
def get_user_list(host_name, client_name, client_pass):
"""
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
"""
|
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getusrs",
values="whr=*")
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall("./result/row/usr")
for xml_row in xml_rows:
append_user_id(xml_row.text)
return user_id_list
|
<SYSTEM_TASK:>
Add a number of numerical features in the client.
<END_TASK>
<USER_TASK:>
Description:
def add_features(host_name, client_name, client_pass, feature_names):
"""
Add a number of numerical features in the client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names.
"""
|
init_feats = ("&".join(["%s=0"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
"addftr",
init_feats)
send_request(host_name,
features_req)
|
<SYSTEM_TASK:>
Remove a number of numerical features in the client. If a list is not provided, remove all features.
<END_TASK>
<USER_TASK:>
Description:
def delete_features(host_name, client_name, client_pass, feature_names=None):
"""
Remove a number of numerical features in the client. If a list is not provided, remove all features.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names.
"""
|
# Get all features.
if feature_names is None:
feature_names = get_feature_names(host_name,
client_name,
client_pass)
# Remove all features.
feature_to_be_removed = ("&".join(["ftr=%s"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
'remftr',
feature_to_be_removed)
send_request(host_name,
features_req)
|
<SYSTEM_TASK:>
Get the names of all features in a PServer client.
<END_TASK>
<USER_TASK:>
Description:
def get_feature_names(host_name, client_name, client_pass):
"""
Get the names of all features in a PServer client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - feature_names: A python list of feature names.
"""
|
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getftrdef",
values="ftr=*")
# Send request.
request_result = send_request(host_name,
request)
# Extract a python list from xml object.
feature_names = list()
append_feature_name = feature_names.append
if request_result is not None:
feature_names_xml = request_result.text
tree = etree.parse(StringIO(feature_names_xml))
root = tree.getroot()
xml_rows = root.findall("row/ftr")
for xml_row in xml_rows:
append_feature_name(xml_row.text)
return feature_names
|
<SYSTEM_TASK:>
Construct the request url.
<END_TASK>
<USER_TASK:>
Description:
def construct_request(model_type, client_name, client_pass, command, values):
"""
Construct the request url.
Inputs: - model_type: PServer usage mode type.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- command: A PServer command.
- values: PServer command arguments.
Output: - base_request: The base request string.
"""
|
base_request = ("{model_type}?"
"clnt={client_name}|{client_pass}&"
"com={command}&{values}".format(model_type=model_type,
client_name=client_name,
client_pass=client_pass,
command=command,
values=values))
return base_request
|
<SYSTEM_TASK:>
Sends a PServer url request.
<END_TASK>
<USER_TASK:>
Description:
def send_request(host_name, request):
"""
Sends a PServer url request.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- request: The url request.
"""
|
request = "%s%s" % (host_name, request)
# print(request)
try:
result = requests.get(request)
if result.status_code == 200:
return result
else:
# print(result.status_code)
raise Exception
except Exception as e:
# print(e)
raise e
|
<SYSTEM_TASK:>
Updates a single topic score, for a single user.
<END_TASK>
<USER_TASK:>
Description:
def update_feature_value(host_name, client_name, client_pass, user_twitter_id, feature_name, feature_score):
"""
Updates a single topic score, for a single user.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- user_twitter_id: A Twitter user identifier.
- feature_name: A specific PServer feature name.
- feature_score: The corresponding score.
"""
|
username = str(user_twitter_id)
feature_value = "{0:.2f}".format(feature_score)
joined_ftr_value = "ftr_" + feature_name + "=" + str(feature_value)
values = "usr=%s&%s" % (username, joined_ftr_value)
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="setusr",
values=values)
# Send request.
send_request(host_name,
request)
|
<SYSTEM_TASK:>
Initialize the application once the configuration has been loaded
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app):
"""
Initialize the application once the configuration has been loaded
there.
"""
|
self.app = app
self.log = app.logger.getChild('compass')
self.log.debug("Initializing compass integration")
self.compass_path = self.app.config.get('COMPASS_PATH', 'compass')
self.config_files = self.app.config.get('COMPASS_CONFIGS', None)
self.requestcheck_debug_only = self.app.config.get(
'COMPASS_REQUESTCHECK_DEBUG_ONLY', True)
self.skip_mtime_check = self.app.config.get(
'COMPASS_SKIP_MTIME_CHECK', False)
self.debug_only = self.app.config.get(
'COMPASS_DEBUG_ONLY', False)
self.disabled = self.app.config.get('COMPASS_DISABLED', False)
if not self.debug_only:
self.compile()
if (not self.debug_only) \
and (not self.requestcheck_debug_only or self.app.debug):
self.app.after_request(self.after_request)
|
<SYSTEM_TASK:>
Main entry point that compiles all the specified or found compass
<END_TASK>
<USER_TASK:>
Description:
def compile(self):
"""
Main entry point that compiles all the specified or found compass
projects.
"""
|
if self.disabled:
return
self._check_configs()
for _, cfg in self.configs.iteritems():
cfg.parse()
if cfg.changes_found() or self.skip_mtime_check:
self.log.debug("Changes found for " + cfg.path \
+ " or checks disabled. Compiling...")
cfg.compile(self)
|
<SYSTEM_TASK:>
after_request handler for compiling the compass projects with
<END_TASK>
<USER_TASK:>
Description:
def after_request(self, response):
"""
after_request handler for compiling the compass projects with
each request.
"""
|
if response is not None and request is not None:
# When used as response processor, only run if we are requesting
# anything but a static resource.
if request.endpoint in [None, "static"]:
return response
self.compile()
return response
|
<SYSTEM_TASK:>
Reloads the configuration files.
<END_TASK>
<USER_TASK:>
Description:
def _check_configs(self):
"""
Reloads the configuration files.
"""
|
configs = set(self._find_configs())
known_configs = set(self.configs.keys())
new_configs = configs - known_configs
for cfg in (known_configs - configs):
self.log.debug("Compass configuration has been removed: " + cfg)
del self.configs[cfg]
for cfg in new_configs:
self.log.debug("Found new compass configuration: " + cfg)
self.configs[cfg] = CompassConfig(cfg)
|
<SYSTEM_TASK:>
Scans the project directory for config files or returns
<END_TASK>
<USER_TASK:>
Description:
def _find_configs(self):
"""
Scans the project directory for config files or returns
the explicitly specified list of files.
"""
|
if self.config_files is not None:
return self.config_files
# Walk the whole project tree and look for "config.rb" files
result = []
for path, _, files in os.walk(self.app.root_path):
if "config.rb" in files:
result.append(os.path.join(path, "config.rb"))
return result
|
<SYSTEM_TASK:>
Parse the given compass config file
<END_TASK>
<USER_TASK:>
Description:
def parse(self, replace=False):
"""
Parse the given compass config file
"""
|
if self.last_parsed is not None \
and self.last_parsed > os.path.getmtime(self.path) \
and not replace:
return
self.last_parsed = time.time()
with open(self.path, 'r') as file_:
for line in file_:
match = CONFIG_LINE_RE.match(line.rstrip())
if match:
if match.group(1) == 'sass_dir':
self.src = os.path.join(
self.base_dir, match.group(2)[1:-1])
elif match.group(1) == 'css_dir':
self.dest = os.path.join(
self.base_dir, match.group(2)[1:-1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.