text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Load translations from an existing path.
<END_TASK>
<USER_TASK:>
Description:
def add_path(self, path):
"""Load translations from an existing path.""" |
if not os.path.exists(path):
raise RuntimeError('Path does not exists: %s.' % path)
self.paths.append(path) |
<SYSTEM_TASK:>
Get translation for a specific locale.
<END_TASK>
<USER_TASK:>
Description:
def _get_translation_for_locale(self, locale):
"""Get translation for a specific locale.""" |
translations = None
for dirname in self.paths:
# Load a single catalog.
catalog = Translations.load(dirname, [locale], domain=self.domain)
if translations is None:
if isinstance(catalog, Translations):
translations = catalog
continue
try:
# Merge catalog into global catalog
translations.merge(catalog)
except AttributeError:
# Translations is probably NullTranslations
if isinstance(catalog, NullTranslations):
current_app.logger.debug(
'Compiled translations seems to be missing'
' in {0}.'.format(dirname))
continue
raise
return translations or NullTranslations() |
<SYSTEM_TASK:>
Return the correct gettext translations for a request.
<END_TASK>
<USER_TASK:>
Description:
def get_translations(self):
"""Return the correct gettext translations for a request.
This will never fail and return a dummy translation object if used
outside of the request or if a translation cannot be found.
""" |
ctx = _request_ctx_stack.top
if ctx is None:
return NullTranslations()
locale = get_locale()
cache = self.get_translations_cache(ctx)
translations = cache.get(str(locale))
if translations is None:
translations = self._get_translation_for_locale(locale)
cache[str(locale)] = translations
return translations |
<SYSTEM_TASK:>
Return a JSONEncoder for handling lazy strings from Babel.
<END_TASK>
<USER_TASK:>
Description:
def get_lazystring_encoder(app):
"""Return a JSONEncoder for handling lazy strings from Babel.
Installed on Flask application by default by :class:`InvenioI18N`.
""" |
from speaklater import _LazyString
class JSONEncoder(app.json_encoder):
def default(self, o):
if isinstance(o, _LazyString):
return text_type(o)
return super(JSONEncoder, self).default(o)
return JSONEncoder |
<SYSTEM_TASK:>
Iterate over list of languages.
<END_TASK>
<USER_TASK:>
Description:
def iter_languages(self):
"""Iterate over list of languages.""" |
default_lang = self.babel.default_locale.language
default_title = self.babel.default_locale.get_display_name(
default_lang)
yield (default_lang, default_title)
for l, title in current_app.config.get('I18N_LANGUAGES', []):
yield l, title |
<SYSTEM_TASK:>
Get a list of supported locales.
<END_TASK>
<USER_TASK:>
Description:
def get_locales(self):
"""Get a list of supported locales.
Computes the list using ``I18N_LANGUAGES`` configuration variable.
""" |
if self._locales_cache is None:
langs = [self.babel.default_locale]
for l, dummy_title in current_app.config.get('I18N_LANGUAGES', []):
langs.append(self.babel.load_locale(l))
self._locales_cache = langs
return self._locales_cache |
<SYSTEM_TASK:>
generate doit tasks for each file
<END_TASK>
<USER_TASK:>
Description:
def _gen_tasks(self):
"""generate doit tasks for each file""" |
for filename in self.args:
path = os.path.abspath(filename)
yield {
'name': path,
# 'file_dep': [path],
'actions': [(self.fun, (filename,))],
} |
<SYSTEM_TASK:>
Helper method that empties an iterable as it is iterated over.
<END_TASK>
<USER_TASK:>
Description:
def drain(iterable):
"""
Helper method that empties an iterable as it is iterated over.
Works for:
* ``dict``
* ``collections.deque``
* ``list``
* ``set``
""" |
if getattr(iterable, "popleft", False):
def next_item(coll):
return coll.popleft()
elif getattr(iterable, "popitem", False):
def next_item(coll):
return coll.popitem()
else:
def next_item(coll):
return coll.pop()
while True:
try:
yield next_item(iterable)
except (IndexError, KeyError):
raise StopIteration |
<SYSTEM_TASK:>
Distrubute master instances in different nodes
<END_TASK>
<USER_TASK:>
Description:
def spread(nodes, n):
"""Distrubute master instances in different nodes
{
"192.168.0.1": [node1, node2],
"192.168.0.2": [node3, node4],
"192.168.0.3": [node5, node6]
} => [node1, node3, node5]
""" |
target = []
while len(target) < n and nodes:
for ip, node_group in list(nodes.items()):
if not node_group:
nodes.pop(ip)
continue
target.append(node_group.pop(0))
if len(target) >= n:
break
return target |
<SYSTEM_TASK:>
Determine if URL is safe to redirect to.
<END_TASK>
<USER_TASK:>
Description:
def is_local_url(target):
"""Determine if URL is safe to redirect to.""" |
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc |
<SYSTEM_TASK:>
Compile shader file into Spir-V binary.
<END_TASK>
<USER_TASK:>
Description:
def compile_file_into_spirv(filepath, stage, optimization='size',
warnings_as_errors=False):
"""Compile shader file into Spir-V binary.
This function uses shaderc to compile your glsl file code into Spir-V
code.
Args:
filepath (strs): Absolute path to your shader file
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails.
""" |
with open(filepath, 'rb') as f:
content = f.read()
return compile_into_spirv(content, stage, filepath,
optimization=optimization,
warnings_as_errors=warnings_as_errors) |
<SYSTEM_TASK:>
Compile shader code into Spir-V binary.
<END_TASK>
<USER_TASK:>
Description:
def compile_into_spirv(raw, stage, filepath, language="glsl",
optimization='size', suppress_warnings=False,
warnings_as_errors=False):
"""Compile shader code into Spir-V binary.
This function uses shaderc to compile your glsl or hlsl code into Spir-V
code. You can refer to the shaderc documentation.
Args:
raw (bytes): glsl or hlsl code (bytes format, not str)
stage (str): Pipeline stage in ['vert', 'tesc', 'tese', 'geom',
'frag', 'comp']
filepath (str): Absolute path of the file (needed for #include)
language (str): 'glsl' or 'hlsl'
optimization (str): 'zero' (no optimization) or 'size' (reduce size)
suppress_warnings (bool): True to suppress warnings
warnings_as_errors (bool): Turn warnings into errors
Returns:
bytes: Compiled Spir-V binary.
Raises:
CompilationError: If compilation fails.
""" |
# extract parameters
stage = stages_mapping[stage]
lang = languages_mapping[language]
opt = opt_mapping[optimization]
# initialize options
options = lib.shaderc_compile_options_initialize()
lib.shaderc_compile_options_set_source_language(options, lang)
lib.shaderc_compile_options_set_optimization_level(options, opt)
lib.shaderc_compile_options_set_target_env(
options, lib.shaderc_target_env_vulkan, 0)
lib.shaderc_compile_options_set_auto_bind_uniforms(options, False)
lib.shaderc_compile_options_set_include_callbacks(
options, lib.resolve_callback, lib.release_callback, ffi.NULL)
if suppress_warnings:
lib.shaderc_compile_options_set_suppress_warnings(options)
if warnings_as_errors:
lib.shaderc_compile_options_set_warnings_as_errors(options)
# initialize compiler
compiler = lib.shaderc_compiler_initialize()
# compile
result = lib.shaderc_compile_into_spv(compiler, raw, len(raw), stage,
str.encode(filepath), b"main",
options)
# extract result
status = lib.shaderc_result_get_compilation_status(result)
if status != lib.shaderc_compilation_status_success:
msg = _get_log(result)
lib.shaderc_compile_options_release(options)
lib.shaderc_result_release(result)
lib.shaderc_compiler_release(compiler)
raise CompilationError(msg)
length = lib.shaderc_result_get_length(result)
output_pointer = lib.shaderc_result_get_bytes(result)
tmp = bytearray(length)
ffi.memmove(tmp, output_pointer, length)
spirv = bytes(tmp)
# release resources
lib.shaderc_compile_options_release(options)
lib.shaderc_result_release(result)
lib.shaderc_compiler_release(compiler)
return spirv |
<SYSTEM_TASK:>
Return a clone of file list files where all directories are recursively replaced with their contents.
<END_TASK>
<USER_TASK:>
Description:
def expand_folder(files):
"""Return a clone of file list files where all directories are recursively replaced with their contents.""" |
expfiles = []
for file in files:
if os.path.isdir(file):
for dirpath, dirnames, filenames in os.walk(file):
for filename in filenames:
expfiles.append(os.path.join(dirpath, filename))
else:
expfiles.append(file)
for path in expfiles:
if not os.path.exists(path):
sys.stderr.write('%s: No such file or directory\n' % path)
return expfiles |
<SYSTEM_TASK:>
Return a list of strings corresponding to file names supplied by drag and drop or standard input.
<END_TASK>
<USER_TASK:>
Description:
def get_file_list():
"""Return a list of strings corresponding to file names supplied by drag and drop or standard input.""" |
if len(sys.argv) > 1:
file_list = list(sys.argv[1:]) # make copy
else:
files_str = input('Select the files you want to process and drag and drop them onto this window, '
'or type their names separated by spaces. Paths containing spaces should be '
'surrounded by quotation marks.\nPress ENTER when you\'re done: ')
if "win" in sys.platform:
# the POSIX shlex.split uses backslashes for escape sequences, so Windows paths need to set posix=False
file_list = shlex.split(files_str, posix=False)
# the non-POSIX shlex.split does not automatically clean quotation marks from the final product
file_list = [f.replace('"', '').replace("'", "") for f in file_list]
else:
file_list = shlex.split(files_str, posix=True)
# substitute in shell variables and get absolute paths
for i in range(len(file_list)):
file_list[i] = os.path.abspath( os.path.expanduser(os.path.expandvars(file_list[i])) )
return file_list |
<SYSTEM_TASK:>
Delete nodes from the cluster
<END_TASK>
<USER_TASK:>
Description:
def delete(args):
"""Delete nodes from the cluster
""" |
nodes = [ClusterNode.from_uri(n) for n in args.nodes]
cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster))
echo("Deleting...")
for node in nodes:
cluster.delete_node(node)
cluster.wait() |
<SYSTEM_TASK:>
Balance slots in the cluster.
<END_TASK>
<USER_TASK:>
Description:
def reshard(args):
"""Balance slots in the cluster.
This command will try its best to distribute slots equally.
""" |
cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster))
cluster.reshard() |
<SYSTEM_TASK:>
Make node to be the slave of a master.
<END_TASK>
<USER_TASK:>
Description:
def replicate(ctx, args):
"""Make node to be the slave of a master.
""" |
slave = ClusterNode.from_uri(args.node)
master = ClusterNode.from_uri(args.master)
if not master.is_master():
ctx.abort("Node {!r} is not a master.".format(args.master))
try:
slave.replicate(master.name)
except redis.ResponseError as e:
ctx.abort(str(e))
Cluster.from_node(master).wait() |
<SYSTEM_TASK:>
Execute flushall in all cluster nodes.
<END_TASK>
<USER_TASK:>
Description:
def flushall(args):
"""Execute flushall in all cluster nodes.
""" |
cluster = Cluster.from_node(ClusterNode.from_uri(args.cluster))
for node in cluster.masters:
node.flushall() |
<SYSTEM_TASK:>
Begins the countdown
<END_TASK>
<USER_TASK:>
Description:
def start(label, at=None):
"""Begins the countdown""" |
t = at if at is not None else time.time()
marker = Marker().start(t)
labels[label] = marker.dumps() |
<SYSTEM_TASK:>
Stops the countdown
<END_TASK>
<USER_TASK:>
Description:
def stop(label, at=None, remove_from_labels=False, stop_once=True):
"""Stops the countdown""" |
t = at if at is not None else time.time()
if label not in labels:
return None
timer = Marker().loads(labels[label])
if timer.is_running() or (timer.is_stopped() and not stop_once):
timer.stop(t)
if remove_from_labels:
del labels[label]
else:
labels[label] = timer.dumps()
return timer.duration() |
<SYSTEM_TASK:>
Returns duration in seconds for label
<END_TASK>
<USER_TASK:>
Description:
def duration(label, stop_it=True, stop_at=None):
"""Returns duration in seconds for label""" |
if label not in labels:
return None
if "duration" in labels[label]:
return Duration(labels[label]["duration"])
if stop_it:
return stop(label, at=stop_at)
else:
return None |
<SYSTEM_TASK:>
Handle the response from QuadrigaCX.
<END_TASK>
<USER_TASK:>
Description:
def _handle_response(self, resp):
"""Handle the response from QuadrigaCX.
:param resp: Response from QuadrigaCX.
:type resp: requests.models.Response
:return: Response body.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
""" |
http_code = resp.status_code
if http_code not in self.http_success_status_codes:
raise RequestError(
response=resp,
message='[HTTP {}] {}'.format(http_code, resp.reason)
)
try:
body = resp.json()
except ValueError:
raise RequestError(
response=resp,
message='[HTTP {}] response body: {}'.format(
http_code,
resp.text
)
)
else:
if 'error' in body:
error_code = body['error'].get('code', '?')
raise RequestError(
response=resp,
message='[HTTP {}][ERR {}] {}'.format(
resp.status_code,
error_code,
body['error'].get('message', 'no error message')
),
error_code=error_code
)
return body |
<SYSTEM_TASK:>
Send an HTTP GET request to QuadrigaCX.
<END_TASK>
<USER_TASK:>
Description:
def get(self, endpoint, params=None):
"""Send an HTTP GET request to QuadrigaCX.
:param endpoint: API endpoint.
:type endpoint: str | unicode
:param params: URL parameters.
:type params: dict
:return: Response body from QuadrigaCX.
:rtype: dict
:raise quadriga.exceptions.RequestError: If HTTP OK was not returned.
""" |
response = self._session.get(
url=self._url + endpoint,
params=params,
timeout=self._timeout
)
return self._handle_response(response) |
<SYSTEM_TASK:>
Displays this help menu.
<END_TASK>
<USER_TASK:>
Description:
def command_help(self, *args, **kwargs):
"""Displays this help menu.""" |
print("Commands available:\n")
for name in dir(self):
if not name.startswith("command_"):
continue
name_clean = name[len("command_"):]
print("%s:\n - %s\n" % (name_clean, getattr(self, name).__doc__.strip())) |
<SYSTEM_TASK:>
Detects whether we have everything needed to mount sshfs filesystems.
<END_TASK>
<USER_TASK:>
Description:
def command_preflight_check(self):
"""Detects whether we have everything needed to mount sshfs filesystems.
""" |
checks_pass, failures = self.environment.perform_preflight_check()
if checks_pass:
print('All checks pass.')
else:
sys.stderr.write('Problems encountered:\n')
for msg in failures:
sys.stderr.write(' - %s\n' % msg)
sys.exit(1) |
<SYSTEM_TASK:>
Log a debug message prefixed with order book name.
<END_TASK>
<USER_TASK:>
Description:
def _log(self, message):
"""Log a debug message prefixed with order book name.
:param message: Debug message.
:type message: str | unicode
""" |
self._logger.debug("{}: {}".format(self.name, message)) |
<SYSTEM_TASK:>
Return public orders that are currently open.
<END_TASK>
<USER_TASK:>
Description:
def get_public_orders(self, group=False):
"""Return public orders that are currently open.
:param group: If set to True (default: False), orders with the same
price are grouped.
:type group: bool
:return: Public orders currently open.
:rtype: dict
""" |
self._log('get public orders')
return self._rest_client.get(
endpoint='/order_book',
params={'book': self.name, 'group': int(group)}
) |
<SYSTEM_TASK:>
Return public trades that were completed recently.
<END_TASK>
<USER_TASK:>
Description:
def get_public_trades(self, time_frame='hour'):
"""Return public trades that were completed recently.
:param time_frame: Time frame. Allowed values are "minute" for trades
in the last minute, or "hour" for trades in the last hour (default:
"hour").
:type time_frame: str | unicode
:return: Public trades completed recently.
:rtype: [dict]
""" |
self._log('get public trades')
return self._rest_client.get(
endpoint='/transactions',
params={'book': self.name, 'time': time_frame}
) |
<SYSTEM_TASK:>
Return user's orders that are currently open.
<END_TASK>
<USER_TASK:>
Description:
def get_user_orders(self):
"""Return user's orders that are currently open.
:return: User's orders currently open.
:rtype: [dict]
""" |
self._log('get user orders')
return self._rest_client.post(
endpoint='/open_orders',
payload={'book': self.name}
) |
<SYSTEM_TASK:>
Return user's trade history.
<END_TASK>
<USER_TASK:>
Description:
def get_user_trades(self, limit=0, offset=0, sort='desc'):
"""Return user's trade history.
:param limit: Maximum number of trades to return. If set to 0 or lower,
all trades are returned (default: 0).
:type limit: int
:param offset: Number of trades to skip.
:type offset: int
:param sort: Method used to sort the results by date and time. Allowed
values are "desc" for descending order, and "asc" for ascending
order (default: "desc").
:type sort: str | unicode
:return: User's trade history.
:rtype: [dict]
""" |
self._log('get user trades')
res = self._rest_client.post(
endpoint='/user_transactions',
payload={
'book': self.name,
'limit': limit,
'offset': offset,
'sort': sort
}
)
# TODO Workaround for the broken limit param in QuadrigaCX API
return res[:limit] if len(res) > limit > 0 else res |
<SYSTEM_TASK:>
Place a buy order at market price.
<END_TASK>
<USER_TASK:>
Description:
def buy_market_order(self, amount):
"""Place a buy order at market price.
:param amount: Amount of major currency to buy at market price.
:type amount: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
""" |
amount = str(amount)
self._log("buy {} {} at market price".format(amount, self.major))
return self._rest_client.post(
endpoint='/buy',
payload={'book': self.name, 'amount': amount}
) |
<SYSTEM_TASK:>
Place a buy order at the given limit price.
<END_TASK>
<USER_TASK:>
Description:
def buy_limit_order(self, amount, price):
"""Place a buy order at the given limit price.
:param amount: Amount of major currency to buy at limit price.
:type amount: int | float | str | unicode | decimal.Decimal
:param price: Limit price.
:type price: int | float | str | unicode | decimal.Decimal
:return: Order details.
:rtype: dict
""" |
amount = str(amount)
price = str(price)
self._log("buy {} {} at limit price of {} {}".format(
amount, self.major, price, self.minor
))
return self._rest_client.post(
endpoint='/buy',
payload={'book': self.name, 'amount': amount, 'price': price}
) |
<SYSTEM_TASK:>
Check whether or not the user is logged in.
<END_TASK>
<USER_TASK:>
Description:
def _is_logged_in(self):
""" Check whether or not the user is logged in. """ |
# if the user has not logged in in 24 hours, relogin
if not self._http._has_session() or datetime.utcnow() >= self._lastlogin + timedelta(hours=24):
return self._login()
else:
return {} |
<SYSTEM_TASK:>
Get the state of a thing
<END_TASK>
<USER_TASK:>
Description:
def getState(self, **kwargs):
"""Get the state of a thing
:param thing: a string with the name of the thing, which is then checked using getThings.
:param thingUri: Uri (string) of the thing you are getting the state from, not checked against getThings.
:return: a dict with the state of the Thing.
:raises: ValueError if the requested thing does not exists. NameError if not logged in. SyntaxError when
not exactly one of the params is given.
""" |
login_return = self._is_logged_in()
if "thingUri" in kwargs:
thingUri = kwargs['thingUri']
elif "thing" in kwargs:
thing = kwargs['thing']
for t in self._getThings():
if 'NAME' in t:
if t['NAME'] == thing and 'thingUri' in t:
thingUri = t['thingUri']
break
else:
raise ValueError('Unknown thing: ' + thing)
else:
raise ValueError('No things available.')
else:
return SyntaxError("Please provide either the 'thing' name or the 'thingUri' not both and at least one")
data = {
"path": "/thing" + thingUri,
"host": "https://thing.brunt.co:8080"
}
resp = self._http.request(data, RequestTypes.GET)
resp.update(login_return)
return resp |
<SYSTEM_TASK:>
Change a variable of the thing. Mostly included for future additions.
<END_TASK>
<USER_TASK:>
Description:
def changeKey(self, **kwargs):
"""Change a variable of the thing. Mostly included for future additions.
:param key: The value you want to change
:param value: The new value
:param thing: a string with the name of the thing, which is then checked using getThings.
:param thingUri: Uri (string) of the thing you are getting the state from, not checked against getThings.
:return: a dict with the state of the Thing.
:raises: ValueError if the requested thing does not exists or the position is not between 0 and 100.
NameError if not logged in. SyntaxError when not exactly one of the params is given.
""" |
login_return = self._is_logged_in()
#check the thing being changed
if "thingUri" in kwargs:
thingUri = kwargs['thingUri']
elif "thing" in kwargs:
thing = kwargs['thing']
for t in self._getThings():
if 'NAME' in t:
if t['NAME'] == thing and 'thingUri' in t:
thingUri = t['thingUri']
break
else:
raise ValueError('Unknown thing: ' + thing)
else:
raise ValueError('No things available.')
else:
return SyntaxError("Please provide either the 'thing' name or the 'thingUri' not both and at least one")
if 'key' in kwargs:
key = kwargs['key']
else:
raise SyntaxError("Please provide a key to change")
if 'value' in kwargs:
value = kwargs['value']
else:
raise SyntaxError("Please provide a value to change to")
if key.lower().find('position'):
if int(value) < 0 or int(value) > 100:
return ValueError("Please set the position between 0 and 100.")
#prepare data payload
data = {
"data": {
str(key): str(value)
},
"path": "/thing" + thingUri,
"host": "https://thing.brunt.co:8080"
}
#call the request method and return the response.
resp = self._http.request(data, RequestTypes.PUT)
resp.update(login_return)
return resp |
<SYSTEM_TASK:>
Change the position of the thing.
<END_TASK>
<USER_TASK:>
Description:
def changeRequestPosition(self, request_position, **kwargs):
"""Change the position of the thing.
:param request_position: The new position for the slide (0-100)
:param thing: a string with the name of the thing, which is then checked using getThings.
:param thingUri: Uri (string) of the thing you are getting the state from, not checked against getThings.
:return: a dict with the state of the Thing.
:raises: ValueError if the requested thing does not exists or the position is not between 0 and 100.
NameError if not logged in. SyntaxError when not exactly one of the params is given.
""" |
kwargs['key']="requestPosition"
kwargs['value']=request_position
return self.changeKey(**kwargs) |
<SYSTEM_TASK:>
Create a Holoview NdLayout from a dictionnary of chart objects
<END_TASK>
<USER_TASK:>
Description:
def ndlayout_(self, dataset, kdims, cols=3):
"""
Create a Holoview NdLayout from a dictionnary of chart objects
""" |
try:
return hv.NdLayout(dataset, kdims=kdims).cols(cols)
except Exception as e:
self.err(e, self.layout_, "Can not create layout") |
<SYSTEM_TASK:>
Returns a Holoview Layout from chart objects
<END_TASK>
<USER_TASK:>
Description:
def layout_(self, chart_objs, cols=3):
"""
Returns a Holoview Layout from chart objects
""" |
try:
return hv.Layout(chart_objs).cols(cols)
except Exception as e:
self.err(e, self.layout_, "Can not build layout") |
<SYSTEM_TASK:>
Returns an horizontal line from a column mean value
<END_TASK>
<USER_TASK:>
Description:
def _hline_bokeh_(self, col):
"""
Returns an horizontal line from a column mean value
""" |
c = hv.HLine(self.df[col].mean())
return c |
<SYSTEM_TASK:>
Returns a chart with a smooth line from a serie
<END_TASK>
<USER_TASK:>
Description:
def _sline_bokeh(self, window_size, y_label):
"""
Returns a chart with a smooth line from a serie
""" |
try:
ds2 = self.clone_()
window = np.ones(int(window_size)) / float(window_size)
ds2.df[y_label] = np.convolve(self.df[self.y], window, 'same')
ds2.chart(self.x, y_label)
return ds2.line_()
except Exception as e:
self.err(e, self._sline_bokeh, "Can not draw smooth line chart") |
<SYSTEM_TASK:>
Get a Bokeh chart object
<END_TASK>
<USER_TASK:>
Description:
def _get_bokeh_chart(self, x_field, y_field, chart_type,
label, opts, style, options={}, **kwargs):
"""
Get a Bokeh chart object
""" |
if isinstance(x_field, list):
kdims = x_field
else:
kdims = [x_field]
if isinstance(y_field, list):
vdims = y_field
else:
vdims = [y_field]
args = kwargs
args["data"] = self.df
args["kdims"] = kdims
args["vdims"] = vdims
if label is not None:
args["label"] = label
else:
if self.label is not None:
args["label"] = self.label
chart = None
try:
if chart_type == "line":
chart = hv.Curve(**args)
if chart_type == "hline":
chart = self._hline_bokeh_(y_field)
elif chart_type == "point":
chart = hv.Scatter(**args)
elif chart_type == "area":
chart = hv.Area(**args)
elif chart_type == "bar":
chart = hv.Bars(**args)
elif chart_type == "hist":
chart = hv.Histogram(**args)
elif chart_type == "errorBar":
chart = hv.ErrorBars(**args)
elif chart_type == "heatmap":
chart = hv.HeatMap(**args)
elif chart_type == "lreg":
chart = self._lreg_bokeh(**args)
elif chart_type == "sline":
window_size, y_label = options["window_size"], options["y_label"]
chart = self._sline_bokeh(window_size, y_label)
if chart is None:
self.err("Chart type " + chart_type +
" unknown", self._get_bokeh_chart)
return
endchart = chart(plot=opts, style=style)
return endchart
except DataError as e:
msg = "Column not found in " + x_field + " and " + y_field
self.err(e, self._get_bokeh_chart, msg)
except Exception as e:
self.err(e) |
<SYSTEM_TASK:>
Create a Dataframe with the contents of the GCT file
<END_TASK>
<USER_TASK:>
Description:
def GCT(gct_obj):
"""
Create a Dataframe with the contents of the GCT file
""" |
# Handle all the various initialization types and get an IO object
gct_io = _obtain_io(gct_obj)
# Load the GCT file into a DataFrame
df = pd.read_csv(gct_io, sep='\t', header=2, index_col=[0, 1], skip_blank_lines=True)
# Apply backwards compatible methods
_apply_backwards_compatibility(df)
# Return the Dataframe
return df |
<SYSTEM_TASK:>
Create a Dataframe with the contents of the ODF file
<END_TASK>
<USER_TASK:>
Description:
def ODF(odf_obj):
"""
Create a Dataframe with the contents of the ODF file
For more information on the ODF format see:
http://software.broadinstitute.org/cancer/software/genepattern/file-formats-guide
:odf_obj: The ODF file. Accepts a file-like object, a file path, a URL to the file
or a string containing the raw data.
""" |
# Handle all the various initialization types and get an IO object
odf_io = _obtain_io(odf_obj)
# Read the file as an array of lines
raw_lines = odf_io.readlines()
# Convert byte strings to unicode strings
raw_lines = _bytes_to_str(raw_lines)
try:
# Read the header count
header_count = _extract_header_number(raw_lines)
# Read the header dict
headers = _parse_header(raw_lines)
# Read the model
model = _extract_model(headers)
# Read the column names, if available
column_names = _extract_column_names(headers)
# Assemble the data
data_lines = _join_data_lines(raw_lines, header_count)
# Put together new IO
odf_string_io = io.StringIO(data_lines)
# Load the ODF file into a DataFrame
df = pd.read_csv(odf_string_io, sep='\t', header=None, names=column_names, skip_blank_lines=True)
# Apply backwards compatible methods
_apply_backwards_compatibility(df)
# Apply ODF-specific properties
_apply_odf_properties(df, headers, model)
# Return the Dataframe
return df
# Catch any errors related to parsing the ODF file
except Exception:
raise TypeError('Error parsing ODF file') |
<SYSTEM_TASK:>
Writes the provided DataFrame to a ODF file.
<END_TASK>
<USER_TASK:>
Description:
def write_odf(df, file_path, headers=None):
"""
Writes the provided DataFrame to a ODF file.
Assumes that the DataFrame matches the structure of those produced
by the ODF() function in this library
:param df: the DataFrame to write to ODF
:param file_path: path to which to write the ODF file
:param headers: A dict of ODF headers, if none are provided will attempt to read them from the ODF file
:return:
""" |
if headers is None and hasattr(df, 'headers'):
headers = df.headers
else:
raise AttributeError('ODF headers not provided')
with open(file_path, 'w') as file:
file.write(_header_dict_to_str(headers))
df.to_csv(file, sep='\t', header=False, index=False, mode='w+') |
<SYSTEM_TASK:>
Writes the provided DataFrame to a GCT file.
<END_TASK>
<USER_TASK:>
Description:
def write_gct(df, file_path):
"""
Writes the provided DataFrame to a GCT file.
Assumes that the DataFrame matches the structure of those produced
by the GCT() function in this library
:param df:
:param file_path:
:return:
""" |
with open(file_path, 'w') as file:
file.write('#1.2\n' + str(len(df.index)) + '\t' + str(len(df.columns)) + '\n')
df.to_csv(file, sep='\t', mode='w+') |
<SYSTEM_TASK:>
Used to determine if a given string represents a URL
<END_TASK>
<USER_TASK:>
Description:
def _is_url(url):
"""
Used to determine if a given string represents a URL
""" |
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.match(url) is not None:
return True
else:
return False |
<SYSTEM_TASK:>
Attach properties to the Dataframe to make it backwards compatible with older versions of this library
<END_TASK>
<USER_TASK:>
Description:
def _apply_backwards_compatibility(df):
"""
Attach properties to the Dataframe to make it backwards compatible with older versions of this library
:param df: The dataframe to be modified
""" |
df.row_count = types.MethodType(lambda self: len(self.index), df)
df.col_count = types.MethodType(lambda self: len(self.columns), df)
df.dataframe = df |
<SYSTEM_TASK:>
Attach properties to the Dataframe to carry along ODF metadata
<END_TASK>
<USER_TASK:>
Description:
def _apply_odf_properties(df, headers, model):
"""
Attach properties to the Dataframe to carry along ODF metadata
:param df: The dataframe to be modified
:param headers: The ODF header lines
:param model: The ODF model type
""" |
df.headers = headers
df.model = model |
<SYSTEM_TASK:>
Convert all lines from byte string to unicode string, if necessary
<END_TASK>
<USER_TASK:>
Description:
def _bytes_to_str(lines):
"""
Convert all lines from byte string to unicode string, if necessary
""" |
if len(lines) >= 1 and hasattr(lines[0], 'decode'):
return [line.decode('utf-8') for line in lines]
else:
return lines |
<SYSTEM_TASK:>
Extracts the number of header lines from the second line of the ODF file
<END_TASK>
<USER_TASK:>
Description:
def _extract_header_number(lines):
"""
Extracts the number of header lines from the second line of the ODF file
""" |
pair = _extract_header_value(lines[1])
value_list = list(pair.values())
return int(value_list[0]) |
<SYSTEM_TASK:>
Count the number of blank lines in the header
<END_TASK>
<USER_TASK:>
Description:
def count_header_blanks(lines, count):
"""
Count the number of blank lines in the header
""" |
blanks = 0
for i in range(2, count + 2):
pair = _extract_header_value(lines[i])
if not pair:
blanks += 1
return blanks |
<SYSTEM_TASK:>
Join all the data lines into a byte string
<END_TASK>
<USER_TASK:>
Description:
def _join_data_lines(lines, skip):
"""
Join all the data lines into a byte string
""" |
lines = list(map(str.strip, lines))
blank_lines = count_header_blanks(lines, skip)
body = lines[skip + blank_lines + 2:]
return '\n'.join(body) |
<SYSTEM_TASK:>
Set the main dataframe instance to rows that contains a string
<END_TASK>
<USER_TASK:>
Description:
def contains(self, column, value):
"""
Set the main dataframe instance to rows that contains a string
value in a column
""" |
df = self.df[self.df[column].str.contains(value) == True]
if df is None:
self.err("Can not select contained data")
return
self.df = df |
<SYSTEM_TASK:>
Set the main dataframe instance to rows that do not
<END_TASK>
<USER_TASK:>
Description:
def ncontains(self, column, value):
"""
Set the main dataframe instance to rows that do not
contains a string value in a column
""" |
df = self.df[self.df[column].str.contains(value) == False]
if df is None:
self.err("Can not select contained data")
return
self.df = df |
<SYSTEM_TASK:>
Sets the main dataframe to rows that has the exact string
<END_TASK>
<USER_TASK:>
Description:
def exact(self, column, *values):
"""
Sets the main dataframe to rows that has the exact string
value in a column
""" |
df = self._exact(column, *values)
if df is None:
self.err("Can not select exact data")
self.df = df |
<SYSTEM_TASK:>
Returns a Dataswim instance with rows that has the exact string
<END_TASK>
<USER_TASK:>
Description:
def exact_(self, column, *values):
"""
Returns a Dataswim instance with rows that has the exact string
value in a column
""" |
df = self._exact(column, *values)
if df is None:
self.err("Can not select exact data")
return self._duplicate_(df) |
<SYSTEM_TASK:>
Write error message with traceback info.
<END_TASK>
<USER_TASK:>
Description:
def exception(self, s):
"""Write error message with traceback info.""" |
self.error(s)
type, value, tb = sys.exc_info()
self.writelines(traceback.format_stack(), 1)
self.writelines(traceback.format_tb(tb)[1:], 1)
self.writelines(traceback.format_exception_only(type, value), 1) |
<SYSTEM_TASK:>
Write message with indentation, context and optional timestamp.
<END_TASK>
<USER_TASK:>
Description:
def write(self, s, level=0, color=None):
"""Write message with indentation, context and optional timestamp.""" |
if level > self.level:
return
if self.timestamps:
timestamp = time.strftime(u'%H:%M:%S ')
else:
timestamp = u''
with lock:
if self.context:
self.stream.write(u'%s%s> ' % (timestamp, self.context))
elif self.context is None:
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
self.stream.write(u'%s' % s, color=color)
try:
text_type = unicode
except NameError:
text_type = str
self.stream.write(text_type(os.linesep))
self.stream.flush() |
<SYSTEM_TASK:>
Write multiple messages.
<END_TASK>
<USER_TASK:>
Description:
def writelines(self, lines, level=0):
"""Write multiple messages.""" |
for line in lines:
for line in line.rstrip(u'\n').split(u'\n'):
self.write(line.rstrip(u'\n'), level=level) |
<SYSTEM_TASK:>
Shuffle the forms in the formset of multilingual model in the
<END_TASK>
<USER_TASK:>
Description:
def reorder_translation_formset_by_language_code(inline_admin_form):
"""
Shuffle the forms in the formset of multilingual model in the
order of their language_ids.
""" |
lang_to_form = dict([(form.form.initial['language_id'], form)
for form in inline_admin_form])
return [lang_to_form[language_code] for language_code in
get_language_code_list()] |
<SYSTEM_TASK:>
Invoked by the script installed by setuptools.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Invoked by the script installed by setuptools.""" |
parser.name('tinman')
parser.description(__desc__)
p = parser.get()
p.add_argument('-p', '--path',
action='store',
dest='path',
help='Path to prepend to the Python system path')
helper.start(Controller) |
<SYSTEM_TASK:>
If the cli arg for foreground is set, set the configuration option
<END_TASK>
<USER_TASK:>
Description:
def enable_debug(self):
"""If the cli arg for foreground is set, set the configuration option
for debug.
""" |
if self.args.foreground:
self.config.application[config.DEBUG] = True |
<SYSTEM_TASK:>
Inserts a base path into the sys.path list if one is specified in
<END_TASK>
<USER_TASK:>
Description:
def insert_paths(self):
"""Inserts a base path into the sys.path list if one is specified in
the configuration.
""" |
if self.args.path:
sys.path.insert(0, self.args.path)
if hasattr(self.config.application, config.PATHS):
if hasattr(self.config.application.paths, config.BASE):
sys.path.insert(0, self.config.application.paths.base) |
<SYSTEM_TASK:>
Check up on child processes and make sure everything is running as
<END_TASK>
<USER_TASK:>
Description:
def process(self):
"""Check up on child processes and make sure everything is running as
it should be.
""" |
children = len(self.living_children)
LOGGER.debug('%i active child%s',
children, '' if children == 1 else 'ren') |
<SYSTEM_TASK:>
Return the list of ports to spawn
<END_TASK>
<USER_TASK:>
Description:
def ports_to_spawn(self):
"""Return the list of ports to spawn
:rtype: list
""" |
return (self.config.get(config.HTTP_SERVER, dict()).get(config.PORTS)
or self.DEFAULT_PORTS) |
<SYSTEM_TASK:>
Munge in the base path into the configuration values
<END_TASK>
<USER_TASK:>
Description:
def set_base_path(self, value):
"""Munge in the base path into the configuration values
:param str value: The path value
""" |
if config.PATHS not in self.config.application:
self.config.application[config.PATHS] = dict()
if config.BASE not in self.config.application[config.PATHS]:
self.config.application[config.PATHS][config.BASE] = value |
<SYSTEM_TASK:>
Send SIGABRT to child processes to instruct them to stop
<END_TASK>
<USER_TASK:>
Description:
def shutdown(self):
"""Send SIGABRT to child processes to instruct them to stop""" |
self.signal_children(signal.SIGABRT)
# Wait a few iterations when trying to stop children before terminating
waiting = 0
while self.living_children:
time.sleep(0.5)
waiting += 1
if waiting == self.MAX_SHUTDOWN_WAIT:
self.signal_children(signal.SIGKILL)
break |
<SYSTEM_TASK:>
Send a signal to all children
<END_TASK>
<USER_TASK:>
Description:
def signal_children(self, signum):
"""Send a signal to all children
:param int signum: The signal to send
""" |
LOGGER.info('Sending signal %i to children', signum)
for child in self.living_children:
if child.pid != os.getpid():
os.kill(child.pid, signum) |
<SYSTEM_TASK:>
Create an Application and HTTPServer for the given port.
<END_TASK>
<USER_TASK:>
Description:
def spawn_process(self, port):
"""Create an Application and HTTPServer for the given port.
:param int port: The port to listen on
:rtype: multiprocessing.Process
""" |
return process.Process(name="ServerProcess.%i" % port,
kwargs={'namespace': self.namespace,
'port': port}) |
<SYSTEM_TASK:>
Spawn of the appropriate number of application processes
<END_TASK>
<USER_TASK:>
Description:
def spawn_processes(self):
"""Spawn of the appropriate number of application processes""" |
for port in self.ports_to_spawn:
process = self.spawn_process(port)
process.start()
self.children.append(process) |
<SYSTEM_TASK:>
Add a diff column in percentage from a serie. The serie is
<END_TASK>
<USER_TASK:>
Description:
def diffsp(self, col: str, serie: "iterable", name: str="Diff"):
"""
Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
""" |
try:
d = []
for i, row in self.df.iterrows():
v = (row[col]*100) / serie[i]
d.append(v)
self.df[name] = d
except Exception as e:
self.err(e, self._append, "Can not diff column from serie") |
<SYSTEM_TASK:>
Group by and mean column
<END_TASK>
<USER_TASK:>
Description:
def gmean_(self, col: str, index_col: bool=True) -> "Ds":
"""
Group by and mean column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gmean("Col 1")``
""" |
try:
df = self.df.copy()
df = df.groupby([col]).mean()
if index_col is True:
df[col] = df.index.values
return self._duplicate_(df)
except Exception as e:
self.err(e, self.gmean_, "Can not meansum column") |
<SYSTEM_TASK:>
Group by and sum column
<END_TASK>
<USER_TASK:>
Description:
def gsum_(self, col: str, index_col: bool=True) -> "Ds":
"""
Group by and sum column
:param col: column to group
:type col: str
:param index_col:
:type index_col: bool
:return: a dataswim instance
:rtype: Ds
:example: ``ds2 = ds.gsum("Col 1")``
""" |
try:
df = self.df.copy()
df = df.groupby([col]).sum()
if index_col is True:
df[col] = df.index.values
return self._duplicate_(df)
except Exception as e:
self.err(e, self.gsum_, "Can not groupsum column") |
<SYSTEM_TASK:>
Add a column whith the percentages ratio from a column
<END_TASK>
<USER_TASK:>
Description:
def ratio(self, col: str, ratio_col: str="Ratio"):
"""
Add a column whith the percentages ratio from a column
:param col: column to calculate ratio from
:type col: str
:param ratio_col: new ratio column name, defaults to "Ratio"
:param ratio_col: str, optional
:example: ``ds2 = ds.ratio("Col 1")``
""" |
try:
df = self.df.copy()
df[ratio_col] = df[[col]].apply(
lambda x: 100 * x / float(x.sum()))
self.df = df
except Exception as e:
self.err(e, self.ratio, "Can not calculate ratio") |
<SYSTEM_TASK:>
Set the values of the model based upon the content of the passed in
<END_TASK>
<USER_TASK:>
Description:
def from_dict(self, value):
"""Set the values of the model based upon the content of the passed in
dictionary.
:param dict value: The dictionary of values to assign to this model
""" |
for key in self.keys():
setattr(self, key, value.get(key, None)) |
<SYSTEM_TASK:>
Return a sha1 hash of the model items.
<END_TASK>
<USER_TASK:>
Description:
def sha1(self):
"""Return a sha1 hash of the model items.
:rtype: str
""" |
sha1 = hashlib.sha1(''.join(['%s:%s' % (k,v) for k,v in self.items()]))
return str(sha1.hexdigest()) |
<SYSTEM_TASK:>
Returns context variables containing information about available languages.
<END_TASK>
<USER_TASK:>
Description:
def multilingual(request):
"""
Returns context variables containing information about available languages.
""" |
codes = sorted(get_language_code_list())
return {'LANGUAGE_CODES': codes,
'LANGUAGE_CODES_AND_NAMES': [(c, LANG_DICT.get(c, c)) for c in codes],
'DEFAULT_LANGUAGE_CODE': get_default_language_code(),
'ADMIN_MEDIA_URL': settings.ADMIN_MEDIA_PREFIX} |
<SYSTEM_TASK:>
Return a tuple of host and port for the statsd server to send
<END_TASK>
<USER_TASK:>
Description:
def _statsd_address(self):
"""Return a tuple of host and port for the statsd server to send
stats to.
:return: tuple(host, port)
""" |
return (self.application.settings.get('statsd',
{}).get('host',
self.STATSD_HOST),
self.application.settings.get('statsd',
{}).get('port',
self.STATSD_PORT)) |
<SYSTEM_TASK:>
Create a new redis client and assign it the class _redis_client
<END_TASK>
<USER_TASK:>
Description:
def _new_redis_client(self):
"""Create a new redis client and assign it the class _redis_client
attribute for reuse across requests.
:rtype: tornadoredis.Client()
""" |
if 'tornadoredis' not in globals():
import tornadoredis
kwargs = self._redis_connection_settings()
LOGGER.info('Connecting to %(host)s:%(port)s DB %(selected_db)s',
kwargs)
return tornadoredis.Client(**kwargs) |
<SYSTEM_TASK:>
Return a dictionary of redis connection settings.
<END_TASK>
<USER_TASK:>
Description:
def _redis_connection_settings(self):
"""Return a dictionary of redis connection settings.
""" |
return {config.HOST: self.settings.get(config.HOST, self._REDIS_HOST),
config.PORT: self.settings.get(config.PORT, self._REDIS_PORT),
'selected_db': self.settings.get(config.DB, self._REDIS_DB)} |
<SYSTEM_TASK:>
Handle delete of an item
<END_TASK>
<USER_TASK:>
Description:
def delete(self, *args, **kwargs):
"""Handle delete of an item
:param args:
:param kwargs:
""" |
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
self.not_found()
return
# Stub to check for delete permissions
if not self.has_delete_permission():
self.permission_denied()
return
# Delete the model from its storage backend
self.model.delete()
# Set the status to request processed, no content returned
self.set_status(204)
self.finish() |
<SYSTEM_TASK:>
Handle HEAD requests for the item
<END_TASK>
<USER_TASK:>
Description:
def head(self, *args, **kwargs):
"""Handle HEAD requests for the item
:param args:
:param kwargs:
""" |
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
self.not_found()
return
# Stub to check for read permissions
if not self.has_read_permission():
self.permission_denied()
return
# Add the headers (etag, content-length), set the status
self.add_headers()
self.set_status(200)
self.finish() |
<SYSTEM_TASK:>
Handle reading of the model
<END_TASK>
<USER_TASK:>
Description:
def get(self, *args, **kwargs):
"""Handle reading of the model
:param args:
:param kwargs:
""" |
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
LOGGER.debug('Not found')
self.not_found()
return
# Stub to check for read permissions
if not self.has_read_permission():
LOGGER.debug('Permission denied')
self.permission_denied()
return
# Add the headers and return the content as JSON
self.add_headers()
self.finish(self.model_json()) |
<SYSTEM_TASK:>
Handle creation of an item.
<END_TASK>
<USER_TASK:>
Description:
def post(self, *args, **kwargs):
"""Handle creation of an item.
:param args:
:param kwargs:
""" |
self.initialize_post()
# Don't allow the post if the poster does not have permission
if not self.has_create_permission():
LOGGER.debug('Does not have write_permission')
self.set_status(403, self.status_message('Creation Forbidden'))
self.finish()
return
result = yield self.model.save()
if result:
self.set_status(201, self.status_message('Created'))
self.add_headers()
self.finish(self.model.as_dict())
else:
self.set_status(507, self.status_message('Creation Failed'))
self.finish() |
<SYSTEM_TASK:>
Invoked by the ModelAPIRequestHandler.post method prior to taking
<END_TASK>
<USER_TASK:>
Description:
def initialize_post(self):
"""Invoked by the ModelAPIRequestHandler.post method prior to taking
any action.
""" |
self.model = self.get_model()
for key in self.model.keys():
self.model.set(key, self.json_arguments.get(key)) |
<SYSTEM_TASK:>
Apply the self.categorical_mappings_ mappings where necessary.
<END_TASK>
<USER_TASK:>
Description:
def _convert_nonstring_categoricals(self, param_dict):
"""Apply the self.categorical_mappings_ mappings where necessary.""" |
return {name: (self.categorical_mappings_[name][val] if name in self.categorical_mappings_ else val)
for (name, val) in param_dict.items()} |
<SYSTEM_TASK:>
Extend to the delete the session from storage
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""Extend to the delete the session from storage
""" |
self.clear()
if os.path.isfile(self._filename):
os.unlink(self._filename)
else:
LOGGER.debug('Session file did not exist: %s', self._filename) |
<SYSTEM_TASK:>
Remove any stale files from the session storage directory
<END_TASK>
<USER_TASK:>
Description:
def _cleanup(self):
"""Remove any stale files from the session storage directory""" |
for filename in os.listdir(self._storage_dir):
file_path = path.join(self._storage_dir, filename)
file_stat = os.stat(file_path)
evaluate = max(file_stat.st_ctime, file_stat.st_mtime)
if evaluate + self._duration < time.time():
LOGGER.debug('Removing stale file: %s', file_path)
os.unlink(file_path) |
<SYSTEM_TASK:>
Setup the storage directory path value and ensure the path exists.
<END_TASK>
<USER_TASK:>
Description:
def _setup_storage_dir(self):
"""Setup the storage directory path value and ensure the path exists.
:rtype: str
:raises: tinman.exceptions.ConfigurationException
""" |
dir_path = self._settings.get(config.DIRECTORY)
if dir_path is None:
dir_path = self._default_path
if not os.path.exists(dir_path):
self._make_path(dir_path)
else:
dir_path = path.abspath(dir_path)
if not os.path.exists(dir_path) or not os.path.isdir(dir_path):
raise exceptions.ConfigurationException(self.__class__.__name__,
config.DIRECTORY)
return dir_path.rstrip('/') |
<SYSTEM_TASK:>
Connect to redis and assign the client to the RedisSession class
<END_TASK>
<USER_TASK:>
Description:
def _redis_connect(cls, settings):
"""Connect to redis and assign the client to the RedisSession class
so that it is globally available in this process.
:param dict settings: The redis session configuration
""" |
if 'tornadoredis' not in globals():
import tornadoredis
kwargs = {'host': settings.get('host', cls.REDIS_HOST),
'port': settings.get('port', cls.REDIS_PORT),
'selected_db': settings.get('db', cls.REDIS_DB)}
LOGGER.info('Connecting to %(host)s:%(port)s DB %(selected_db)s',
kwargs)
cls._redis_client = tornadoredis.Client(**kwargs)
cls._redis_client.connect() |
<SYSTEM_TASK:>
Store the session data in redis
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""Store the session data in redis
:param method callback: The callback method to invoke when done
""" |
result = yield gen.Task(RedisSession._redis_client.set,
self._key, self.dumps())
LOGGER.debug('Saved session %s (%r)', self.id, result)
raise gen.Return(result) |
<SYSTEM_TASK:>
Create an instance of a tornado.template.Template object for the
<END_TASK>
<USER_TASK:>
Description:
def _create_template(self, name):
"""Create an instance of a tornado.template.Template object for the
given template name.
:param str name: The name/path to the template
:rtype: tornado.template.Template
""" |
url = '%s/%s' % (self._base_url, escape.url_escape(name))
LOGGER.debug('Making HTTP GET request to %s', url)
response = self._http_client.fetch(url)
data = json.loads(response.body, ensure_ascii=False)
return template.Template(data['template'], name=name, loader=self) |
<SYSTEM_TASK:>
Parses Crianza source code and returns a native Python function.
<END_TASK>
<USER_TASK:>
Description:
def xcompile(source_code, args=0, optimize=True):
"""Parses Crianza source code and returns a native Python function.
Args:
args: The resulting function's number of input parameters.
Returns:
A callable Python function.
""" |
code = crianza.compile(crianza.parse(source_code), optimize=optimize)
return crianza.native.compile(code, args=args) |
<SYSTEM_TASK:>
Compiles to native Python bytecode and runs program, returning the
<END_TASK>
<USER_TASK:>
Description:
def xeval(source, optimize=True):
"""Compiles to native Python bytecode and runs program, returning the
topmost value on the stack.
Args:
optimize: Whether to optimize the code after parsing it.
Returns:
None: If the stack is empty
obj: If the stack contains a single value
[obj, obj, ...]: If the stack contains many values
""" |
native = xcompile(source, optimize=optimize)
return native() |
<SYSTEM_TASK:>
Prevents cycles in the tree.
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
""" Prevents cycles in the tree. """ |
super(CTENode, self).clean()
if self.parent and self.pk in getattr(self.parent, self._cte_node_path):
raise ValidationError(_("A node cannot be made a descendant of itself.")) |
<SYSTEM_TASK:>
Controls the flow of the ddg application
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Controls the flow of the ddg application""" |
'Build the parser and parse the arguments'
parser = argparse.ArgumentParser(
description='www.duckduckgo.com zero-click api for your command-line'
)
parser.add_argument('query', nargs='*', help='the search query')
parser.add_argument('-b', '--bang', action='store_true',
help='open the !bang redirect url in a new browser tab')
parser.add_argument('-d', '--define', action='store_true',
help='return the definition result')
parser.add_argument('-j', '--json', action='store_true',
help='return the zero-click info api json response')
parser.add_argument('-l', '--lucky', action='store_true',
help='open the result url in a new browser tab')
parser.add_argument('-s', '--search', action='store_true',
help='launch a DuckDuckGo search in a new browser tab')
parser.add_argument('-u', '--url', action='store_true',
help='return the result url')
args = parser.parse_args()
'Get the queries'
if args.query:
queries = [' '.join(args.query)]
elif not sys.stdin.isatty():
queries = sys.stdin.read().splitlines()
else:
parser.print_help()
return
'Determine if we need to add any prefixes based on user flags'
prefix = '!ddg ' if args.search else '!' if args.bang else ''
'Loop through each query'
for query in queries:
'Prefix the query'
query = prefix + query
'Get a response from api.duckduck.com using the duckduckgo module'
results = duckduckgo.search(query)
'If results is null, continue to the next query'
if not results:
continue
'Print the raw json output and return'
if args.json:
print_result(results.json)
continue
'a list of where to look for an answer first'
results_priority = get_results_priority(args)
'do we want the text or url output of the answer found'
var = get_text_or_url(args)
'action to perform when an answer is found'
action = get_action(args)
'Search for an answer and perform an action'
failed_to_find_answer = True
for r in results_priority:
result = getattr(getattr(results, r), var)
if result:
action(result)
failed_to_find_answer = False
break
'Let the user know if no answer was found'
if failed_to_find_answer:
if results.type == 'disambiguation':
print 'Your query was ambiguous, please be more specific'
else:
print 'No results found' |
<SYSTEM_TASK:>
Return a result priority list based on user input
<END_TASK>
<USER_TASK:>
Description:
def get_results_priority(args):
"""Return a result priority list based on user input""" |
redirect_mode = args.bang or args.search or args.lucky
if redirect_mode:
results_priority = ['redirect', 'result', 'abstract']
else:
results_priority = ['answer', 'abstract', 'result']
insert_pos = 0 if args.define else len(results_priority)
results_priority.insert(insert_pos, 'definition')
return results_priority |
<SYSTEM_TASK:>
Return a function to launch the web browser or print a result
<END_TASK>
<USER_TASK:>
Description:
def get_action(args):
"""Return a function to launch the web browser or print a result""" |
redirect_mode = args.bang or args.search or args.lucky
if redirect_mode and not args.url:
return webbrowser.open_new_tab
else:
return print_result |
<SYSTEM_TASK:>
Determine if we need text or url output
<END_TASK>
<USER_TASK:>
Description:
def get_text_or_url(args):
"""Determine if we need text or url output""" |
redirect_mode = args.bang or args.search or args.lucky
if redirect_mode or args.url:
return 'url'
else:
return 'text' |
<SYSTEM_TASK:>
Print the existing tables in a database
<END_TASK>
<USER_TASK:>
Description:
def tables(self):
"""Print the existing tables in a database
:example: ``ds.tables()``
""" |
if self._check_db() == False:
return
try:
pmodels = self._tables()
if pmodels is None:
return
num = len(pmodels)
s = "s"
if num == 1:
s = ""
msg = "Found " + colors.bold(str(num)) + " table" + s + ":\n"
msg += "\n".join(pmodels)
self.info(msg)
except Exception as e:
self.err(e, "Can not print tables") |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.