text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns True if the target folder is older than the source folder.
<END_TASK>
<USER_TASK:>
Description:
def changes_found(self):
"""
Returns True if the target folder is older than the source folder.
"""
|
if self.dest is None:
warnings.warn("dest directory not found!")
if self.src is None:
warnings.warn("src directory not found!")
if self.src is None or self.dest is None:
return False
dest_mtime = -1
src_mtime = os.path.getmtime(self.src)
if os.path.exists(self.dest):
dest_mtime = os.path.getmtime(self.dest)
if src_mtime >= dest_mtime:
return True # changes found
for folder, _, files in os.walk(self.src):
for filename in fnmatch.filter(files, '*.scss'):
src_path = os.path.join(folder, filename)
if os.path.getmtime(src_path) >= dest_mtime:
return True
return False
|
<SYSTEM_TASK:>
Calls the compass script specified in the compass extension
<END_TASK>
<USER_TASK:>
Description:
def compile(self, compass):
"""
Calls the compass script specified in the compass extension
with the paths provided by the config.rb.
"""
|
try:
output = subprocess.check_output(
[compass.compass_path, 'compile', '-q'],
cwd=self.base_dir)
os.utime(self.dest, None)
compass.log.debug(output)
except OSError, e:
if e.errno == errno.ENOENT:
compass.log.error("Compass could not be found in the PATH " +
"and/or in the COMPASS_PATH setting! " +
"Disabling compilation.")
compass.disabled = True
else:
raise e
|
<SYSTEM_TASK:>
lift deeply nested expressions out of redundant parentheses
<END_TASK>
<USER_TASK:>
Description:
def _remove_otiose(lst):
"""lift deeply nested expressions out of redundant parentheses"""
|
listtype = type([])
while type(lst) == listtype and len(lst) == 1:
lst = lst[0]
return lst
|
<SYSTEM_TASK:>
Retrives work commits from repo
<END_TASK>
<USER_TASK:>
Description:
def get_work_commits(repo_addr, ascending = True, tz = 'US/Eastern', correct_times = True):
"""Retrives work commits from repo"""
|
repo = git.Repo(repo_addr)
commits = list(repo.iter_commits())
logs = [(c.authored_datetime, c.message.strip('\n'), str(c)) for c in repo.iter_commits()]
work = pd.DataFrame.from_records(logs, columns = ['time', 'message', 'hash'])
work.time = pd.DatetimeIndex([pd.Timestamp(i).tz_convert(tz) for i in work.time])
work.set_index('time', inplace = True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
work = work.sort_index(ascending = ascending)
if correct_times:
work = adjust_time(work)
return work, repo
|
<SYSTEM_TASK:>
Opens one of the topic set resource files and returns a set of topics.
<END_TASK>
<USER_TASK:>
Description:
def get_topic_set(file_path):
"""
Opens one of the topic set resource files and returns a set of topics.
- Input: - file_path: The path pointing to the topic set resource file.
- Output: - topic_set: A python set of strings.
"""
|
topic_set = set()
file_row_gen = get_file_row_generator(file_path, ",") # The separator here is irrelevant.
for file_row in file_row_gen:
topic_set.add(file_row[0])
return topic_set
|
<SYSTEM_TASK:>
Returns a set of all the topics that are interesting for REVEAL use-cases.
<END_TASK>
<USER_TASK:>
Description:
def get_reveal_set():
"""
Returns a set of all the topics that are interesting for REVEAL use-cases.
"""
|
file_path = get_package_path() + "/twitter/res/topics/story_set.txt"
story_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/theme_set.txt"
theme_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/attribute_set.txt"
attribute_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/stance_set.txt"
stance_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/geographical_set.txt"
geographical_topics = get_topic_set(file_path)
topics = story_topics | theme_topics | attribute_topics | stance_topics | geographical_topics
return topics
|
<SYSTEM_TASK:>
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
<END_TASK>
<USER_TASK:>
Description:
def get_topic_keyword_dictionary():
"""
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary.
"""
|
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary
|
<SYSTEM_TASK:>
Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
<END_TASK>
<USER_TASK:>
Description:
def get_input(self, name, ds):
"""
Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
Args:
name (str): The name of the input.
ds (openflow.DataSource): The DataSource that will feed the data.
Returns:
pandas.DataFrame: The content of the input.
"""
|
columns = self.inputs.get(name)
df = ds.get_dataframe()
# set defaults
for column in columns:
if column not in df.columns:
df[column] = self.defaults.get(column)
return df[columns]
|
<SYSTEM_TASK:>
Returns a domain relationship equivalent with this resource
<END_TASK>
<USER_TASK:>
Description:
def domain_relationship(self):
"""
Returns a domain relationship equivalent with this resource
relationship.
"""
|
if self.__domain_relationship is None:
ent = self.relator.get_entity()
self.__domain_relationship = \
self.descriptor.make_relationship(ent)
return self.__domain_relationship
|
<SYSTEM_TASK:>
Date a proxy record
<END_TASK>
<USER_TASK:>
Description:
def date(self, proxy, how='median', n=500):
"""Date a proxy record
Parameters
----------
proxy : ProxyRecord
how : str
How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n'
randomly selected members of the MCMC ensemble. Default is 'median'.
n : int
If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement.
Returns
-------
DatedProxyRecord
"""
|
assert how in ['median', 'ensemble']
ens_members = self.mcmcfit.n_members()
if how == 'ensemble':
select_idx = np.random.choice(range(ens_members), size=n, replace=True)
out = []
for d in proxy.data.depth.values:
age = self.agedepth(d)
if how == 'median':
age = np.median(age)
elif how == 'ensemble':
age = age[select_idx]
out.append(age)
return DatedProxyRecord(proxy.data.copy(), out)
|
<SYSTEM_TASK:>
Get calendar age for a depth
<END_TASK>
<USER_TASK:>
Description:
def agedepth(self, d):
"""Get calendar age for a depth
Parameters
----------
d : float
Sediment depth (in cm).
Returns
-------
Numeric giving true age at given depth.
"""
|
# TODO(brews): Function cannot handle hiatus
# See lines 77 - 100 of hist2.cpp
x = self.mcmcfit.sediment_rate
theta0 = self.mcmcfit.headage # Age abscissa (in yrs). If array, dimension should be iterations or realizations of the sediment
deltac = self.thick
c0 = min(self.depth) # Uniform depth segment abscissa (in cm).
assert d > c0 or np.isclose(c0, d, atol = 1e-4)
out = theta0.astype(float)
i = int(np.floor((d - c0) / deltac))
for j in range(i):
out += x[j] * deltac
ci = c0 + i * deltac
assert ci < d or np.isclose(ci, d, atol = 1e-4)
try:
next_x = x[i]
except IndexError:
# Extrapolating
next_x = x[i - 1]
out += next_x * (d - ci)
return out
|
<SYSTEM_TASK:>
Plot prior chronology dates in age-depth plot
<END_TASK>
<USER_TASK:>
Description:
def plot_prior_dates(self, dwidth=30, ax=None):
"""Plot prior chronology dates in age-depth plot"""
|
if ax is None:
ax = plt.gca()
depth, probs = self.prior_dates()
pat = []
for i, d in enumerate(depth):
p = probs[i]
z = np.array([p[:, 0], dwidth * p[:, 1] / np.sum(p[:, 1])]) # Normalize
z = z[:, z[0].argsort(kind='mergesort')] # np.interp requires `xp` arg to be sorted
zy = np.linspace(np.min(z[0]), np.max(z[0]), num=200)
zp = np.interp(x=zy, xp=z[0], fp=z[1])
pol = np.vstack([np.concatenate([d + zp, d - zp[::-1]]),
np.concatenate([zy, zy[::-1]])])
pat.append(Polygon(pol.T))
p = PatchCollection(pat)
p.set_label('Prior dates')
ax.add_collection(p)
ax.autoscale_view()
ax.set_ylabel('Age (cal yr BP)')
ax.set_xlabel('Depth (cm)')
ax.grid(True)
return ax
|
<SYSTEM_TASK:>
Plot sediment accumulation rate prior and posterior distributions
<END_TASK>
<USER_TASK:>
Description:
def plot_sediment_rate(self, ax=None):
"""Plot sediment accumulation rate prior and posterior distributions"""
|
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_rate()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_rate
density = scipy.stats.gaussian_kde(y_posterior.flat)
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
acc_shape = self.mcmcsetup.mcmc_kws['acc_shape']
acc_mean = self.mcmcsetup.mcmc_kws['acc_mean']
annotstr_template = 'acc_shape: {0}\nacc_mean: {1}'
annotstr = annotstr_template.format(acc_shape, acc_mean)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Acc. rate (yr/cm)')
ax.grid(True)
return ax
|
<SYSTEM_TASK:>
Plot sediment memory prior and posterior distributions
<END_TASK>
<USER_TASK:>
Description:
def plot_sediment_memory(self, ax=None):
"""Plot sediment memory prior and posterior distributions"""
|
if ax is None:
ax = plt.gca()
y_prior, x_prior = self.prior_sediment_memory()
ax.plot(x_prior, y_prior, label='Prior')
y_posterior = self.mcmcfit.sediment_memory
density = scipy.stats.gaussian_kde(y_posterior ** (1/self.thick))
density.covariance_factor = lambda: 0.25
density._compute_covariance()
ax.plot(x_prior, density(x_prior), label='Posterior')
mem_mean = self.mcmcsetup.mcmc_kws['mem_mean']
mem_strength = self.mcmcsetup.mcmc_kws['mem_strength']
annotstr_template = 'mem_strength: {0}\nmem_mean: {1}\nthick: {2} cm'
annotstr = annotstr_template.format(mem_strength, mem_mean, self.thick)
ax.annotate(annotstr, xy=(0.9, 0.9), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top')
ax.set_ylabel('Density')
ax.set_xlabel('Memory (ratio)')
ax.grid(True)
return ax
|
<SYSTEM_TASK:>
Print information of a quantitative phase imaging dataset
<END_TASK>
<USER_TASK:>
Description:
def qpinfo():
"""Print information of a quantitative phase imaging dataset"""
|
parser = qpinfo_parser()
args = parser.parse_args()
path = pathlib.Path(args.path).resolve()
try:
ds = load_data(path)
except UnknownFileFormatError:
print("Unknown file format: {}".format(path))
return
print("{} ({})".format(ds.__class__.__doc__, ds.__class__.__name__))
print("- number of images: {}".format(len(ds)))
for key in ds.meta_data:
print("- {}: {}".format(key, ds.meta_data[key]))
|
<SYSTEM_TASK:>
Returns the meta type of the supplied node as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_node_meta_type(manager, handle_id):
"""
Returns the meta type of the supplied node as a string.
:param manager: Neo4jDBSessionManager
:param handle_id: Unique id
:return: string
"""
|
node = get_node(manager=manager, handle_id=handle_id, legacy=False)
for label in node.labels:
if label in META_TYPES:
return label
raise exceptions.NoMetaLabelFound(handle_id)
|
<SYSTEM_TASK:>
Makes a relationship from node to other_node depending on which
<END_TASK>
<USER_TASK:>
Description:
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
|
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type)
|
<SYSTEM_TASK:>
Parse the code details and TOC from the given HTML content
<END_TASK>
<USER_TASK:>
Description:
def parse_code(self, url, html):
"""
Parse the code details and TOC from the given HTML content
:type url: str
:param url: source URL of the page
:type html: unicode
:param html: Content of the HTML
:return: the code
"""
|
soup = BeautifulSoup(html, 'html5lib', from_encoding='utf-8')
# -- main text
div = (soup
.find('div', id='content_false')
.find('div', attrs={'class': 'data'}))
code = Code(self.id_code,
date_pub=self.date_pub,
url_code=cleanup_url(url))
# -- Code title/subtitle
div_title = div.find('div', id='titreTexte')
span_subtitle = div_title.find('span',
attrs={'class': 'sousTitreTexte'})
if span_subtitle:
code.title = div_title.text.replace(span_subtitle.text, '')
code.subtitle = span_subtitle.text.strip()
regex = r'Version consolidée au (\d{1,2}(?:er)?\s+[^\s]+\s+\d{4})'
m = re.search(regex, code.subtitle)
if m:
code.date_pub = parse_date(m.group(1))
code.title = code.title.strip()
# -- TOC
code.children = [self.parse_code_ul(url, child)
for child in div.find_all('ul', recursive=False)]
return code
|
<SYSTEM_TASK:>
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order.
<END_TASK>
<USER_TASK:>
Description:
def add(self, interval, offset):
"""
The added interval must be overlapping or beyond the last stored interval ie. added in sorted order.
:param interval: interval to add
:param offset: full virtual offset to add
:return:
"""
|
start, stop = self.get_start_stop(interval)
if len(self.starts) > 0:
if start < self.starts[-1] or offset <= self.offsets[-1][1]:
raise ValueError('intervals and offsets must be added in-order')
self.offsets[-1][1] = offset
self.offsets[-1][2] += 1
else:
self.starts.append(start)
self.stops.append(stop)
self.offsets.append([offset, offset, 1])
|
<SYSTEM_TASK:>
get sum for queryset.
<END_TASK>
<USER_TASK:>
Description:
def get_sum(qs, field):
"""
get sum for queryset.
``qs``: queryset
``field``: The field name to sum.
"""
|
sum_field = '%s__sum' % field
qty = qs.aggregate(Sum(field))[sum_field]
return qty if qty else 0
|
<SYSTEM_TASK:>
get max for queryset.
<END_TASK>
<USER_TASK:>
Description:
def get_max(qs, field):
"""
get max for queryset.
qs: queryset
field: The field name to max.
"""
|
max_field = '%s__max' % field
num = qs.aggregate(Max(field))[max_field]
return num if num else 0
|
<SYSTEM_TASK:>
auto filter queryset by dict.
<END_TASK>
<USER_TASK:>
Description:
def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]):
"""
auto filter queryset by dict.
qs: queryset need to filter.
qdata:
quick_query_fields:
int_quick_query_fields:
"""
|
try:
qs = qs.filter(
__gen_quick_query_params(
qdata.get('q_quick_search_kw'), quick_query_fields,
int_quick_query_fields)
)
q, kw_query_params = __gen_query_params(qdata)
qs = qs.filter(q, **kw_query_params)
except:
import traceback
traceback.print_exc()
return qs
|
<SYSTEM_TASK:>
Converts a dictionary of variable star data to a `Body` instance.
<END_TASK>
<USER_TASK:>
Description:
def dict_to_body(star_dict):
"""
Converts a dictionary of variable star data to a `Body` instance.
Requires `PyEphem <http://rhodesmill.org/pyephem/>`_ to be installed.
"""
|
if ephem is None: # pragma: no cover
raise NotImplementedError("Please install PyEphem in order to use dict_to_body.")
body = ephem.FixedBody()
body.name = star_dict['name']
body._ra = ephem.hours(str(star_dict['ra']))
body._dec = ephem.degrees(str(star_dict['dec']))
body._epoch = ephem.J2000
return body
|
<SYSTEM_TASK:>
return a readable XML form of the data.
<END_TASK>
<USER_TASK:>
Description:
def workbook_data(self):
"""return a readable XML form of the data."""
|
document = XML(
fn=os.path.splitext(self.fn)[0]+'.xml',
root=Element.workbook())
shared_strings = [
str(t.text) for t in
self.xml('xl/sharedStrings.xml')
.root.xpath(".//xl:t", namespaces=self.NS)]
for key in self.sheets.keys():
worksheet = self.sheets[key].transform(XT, shared_strings=shared_strings)
document.root.append(worksheet.root)
return document
|
<SYSTEM_TASK:>
Put and process tasks in queue.
<END_TASK>
<USER_TASK:>
Description:
def process(self, event):
"""Put and process tasks in queue.
"""
|
logger.info(f"{self}: put {event.src_path}")
self.queue.put(os.path.basename(event.src_path))
|
<SYSTEM_TASK:>
Scrapes Apple's iCal feed for Australian public holidays and generates per-
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
Scrapes Apple's iCal feed for Australian public holidays and generates per-
state listings.
"""
|
print "Downloading Holidays from Apple's server..."
r = requests.get('http://files.apple.com/calendars/Australian32Holidays.ics')
cal = Calendar.from_ical(r.text)
print "Processing calendar data..."
valid_states = ['ACT', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA']
state_cal = {}
all_cal = make_calendar()
for state in valid_states:
state_cal[state] = make_calendar()
for event in cal.walk('VEVENT'):
event_name = event.decoded('SUMMARY').lower()
if filter(lambda x: x in event_name, IGNORED_EVENTS):
continue
# see if there is a state or if it is for all
if '(' in event_name: # and not 'day in lieu' in event_name:
# it is just for certain states.
# eg:
# - Easter Tuesday (TAS)
# - Labour Day (ACT, NSW, SA, QLD)
states = event_name.split('(', 2)[1].split(')')[0].split(',')
if states == ['day in lieu']:
# only a day in lieu, switch to all-cal logic
all_cal.add_component(event)
continue
for state in states:
state = state.strip().upper()
assert state in valid_states, 'state=%r' % state
state_cal[state].add_component(event)
else:
# for all states
all_cal.add_component(event)
print "Writing to disk..."
# done, write calendars.
with open('au_holidays.ics', 'wb') as f:
f.write(all_cal.to_ical())
for state in state_cal.keys():
with open('%s_holidays.ics' % state.lower(), 'wb') as f:
f.write(state_cal[state].to_ical())
print "All done!"
|
<SYSTEM_TASK:>
This is a python generator that yields tweets stored in a mongodb collection.
<END_TASK>
<USER_TASK:>
Description:
def get_collection_documents_generator(client, database_name, collection_name, spec, latest_n, sort_key):
"""
This is a python generator that yields tweets stored in a mongodb collection.
Tweet "created_at" field is assumed to have been stored in the format supported by MongoDB.
Inputs: - client: A pymongo MongoClient object.
- database_name: The name of a Mongo database as a string.
- collection_name: The name of the tweet collection as a string.
- spec: A python dictionary that defines higher query arguments.
- latest_n: The number of latest results we require from the mongo document collection.
- sort_key: A field name according to which we will sort in ascending order.
Yields: - document: A document in python dictionary (json) format.
"""
|
mongo_database = client[database_name]
collection = mongo_database[collection_name]
collection.create_index(sort_key)
if latest_n is not None:
skip_n = collection.count() - latest_n
if collection.count() - latest_n < 0:
skip_n = 0
cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ])
cursor = cursor[skip_n:]
else:
cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ])
for document in cursor:
yield document
|
<SYSTEM_TASK:>
Extract the largest connected component from a graph.
<END_TASK>
<USER_TASK:>
Description:
def extract_connected_components(graph, connectivity_type, node_to_id):
"""
Extract the largest connected component from a graph.
Inputs: - graph: An adjacency matrix in scipy sparse matrix format.
- connectivity_type: A string that can be either: "strong" or "weak".
- node_to_id: A map from graph node id to Twitter id, in python dictionary format.
Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format.
- new_node_to_id: A map from graph node id to Twitter id, in python dictionary format.
- old_node_list: List of nodes from the possibly disconnected original graph.
Raises: - RuntimeError: If there the input graph is empty.
"""
|
# Get a networkx graph.
nx_graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph())
# Calculate all connected components in graph.
if connectivity_type == "weak":
largest_connected_component_list = nxalgcom.weakly_connected_component_subgraphs(nx_graph)
elif connectivity_type == "strong":
largest_connected_component_list = nxalgcom.strongly_connected_component_subgraphs(nx_graph)
else:
print("Invalid connectivity type input.")
raise RuntimeError
# Handle empty graph.
try:
largest_connected_component = max(largest_connected_component_list, key=len)
except ValueError:
print("Error: Empty graph.")
raise RuntimeError
old_node_list = largest_connected_component.nodes()
node_to_node = dict(zip(np.arange(len(old_node_list)), old_node_list))
largest_connected_component = nx.to_scipy_sparse_matrix(largest_connected_component, dtype=np.float64, format="csr")
# Make node_to_id.
new_node_to_id = {k: node_to_id[v] for k, v in node_to_node.items()}
return largest_connected_component, new_node_to_id, old_node_list
|
<SYSTEM_TASK:>
sends an email using the [email protected] account
<END_TASK>
<USER_TASK:>
Description:
def sendEmail(self, subject, body, toAddress=False):
"""
sends an email using the [email protected] account
"""
|
if not toAddress:
toAddress = self.toAddress
toAddress = toAddress.split(';')
message = MIMEText(body)
message['Subject'] = subject
message['From'] = self.fromAddress
message['To'] = ','.join(toAddress)
if not self.testing:
s = SMTP(self.server, self.port)
s.sendmail(self.fromAddress, toAddress, message.as_string())
s.quit()
print('email sent')
else:
print('***Begin Test Email Message***')
print(message)
print('***End Test Email Message***')
|
<SYSTEM_TASK:>
Return a list of possible completions for the string ending at the point.
<END_TASK>
<USER_TASK:>
Description:
def _get_completions(self):
"""Return a list of possible completions for the string ending at the point.
Also set begidx and endidx in the process."""
|
completions = []
self.begidx = self.l_buffer.point
self.endidx = self.l_buffer.point
buf=self.l_buffer.line_buffer
if self.completer:
# get the string to complete
while self.begidx > 0:
self.begidx -= 1
if buf[self.begidx] in self.completer_delims:
self.begidx += 1
break
text = ensure_str(u''.join(buf[self.begidx:self.endidx]))
log(u'complete text="%s"' % ensure_unicode(text))
i = 0
while 1:
try:
r = ensure_unicode(self.completer(text, i))
except IndexError:
break
i += 1
if r is None:
break
elif r and r not in completions:
completions.append(r)
else:
pass
log(u'text completions=<%s>' % map(ensure_unicode, completions))
if (self.complete_filesystem == "on") and not completions:
# get the filename to complete
while self.begidx > 0:
self.begidx -= 1
if buf[self.begidx] in u' \t\n':
self.begidx += 1
break
text = ensure_str(u''.join(buf[self.begidx:self.endidx]))
log(u'file complete text="%s"' % ensure_unicode(text))
completions = map(ensure_unicode, glob.glob(os.path.expanduser(text) + '*'))
if self.mark_directories == u'on':
mc = []
for f in completions:
if os.path.isdir(f):
mc.append(f + os.sep)
else:
mc.append(f)
completions = mc
log(u'fnames=<%s>' % map(ensure_unicode, completions))
return completions
|
<SYSTEM_TASK:>
u"""Attempt to perform completion on the text before point. The
<END_TASK>
<USER_TASK:>
Description:
def complete(self, e): # (TAB)
u"""Attempt to perform completion on the text before point. The
actual completion performed is application-specific. The default is
filename completion."""
|
completions = self._get_completions()
if completions:
cprefix = commonprefix(completions)
if len(cprefix) > 0:
rep = [ c for c in cprefix ]
point=self.l_buffer.point
self.l_buffer[self.begidx:self.endidx] = rep
self.l_buffer.point = point + len(rep) - (self.endidx - self.begidx)
if len(completions) > 1:
if self.show_all_if_ambiguous == u'on':
self._display_completions(completions)
else:
self._bell()
else:
self._bell()
self.finalize()
|
<SYSTEM_TASK:>
u"""List the possible completions of the text before point.
<END_TASK>
<USER_TASK:>
Description:
def possible_completions(self, e): # (M-?)
u"""List the possible completions of the text before point. """
|
completions = self._get_completions()
self._display_completions(completions)
self.finalize()
|
<SYSTEM_TASK:>
u"""Insert all completions of the text before point that would have
<END_TASK>
<USER_TASK:>
Description:
def insert_completions(self, e): # (M-*)
u"""Insert all completions of the text before point that would have
been generated by possible-completions."""
|
completions = self._get_completions()
b = self.begidx
e = self.endidx
for comp in completions:
rep = [ c for c in comp ]
rep.append(' ')
self.l_buffer[b:e] = rep
b += len(rep)
e = b
self.line_cursor = b
self.finalize()
|
<SYSTEM_TASK:>
u"""Insert text into the command line.
<END_TASK>
<USER_TASK:>
Description:
def insert_text(self, string):
u"""Insert text into the command line."""
|
self.l_buffer.insert_text(string, self.argument_reset)
self.finalize()
|
<SYSTEM_TASK:>
u"""Delete the character at point. If point is at the beginning of
<END_TASK>
<USER_TASK:>
Description:
def delete_char(self, e): # (C-d)
u"""Delete the character at point. If point is at the beginning of
the line, there are no characters in the line, and the last
character typed was not bound to delete-char, then return EOF."""
|
self.l_buffer.delete_char(self.argument_reset)
self.finalize()
|
<SYSTEM_TASK:>
u"""Paste windows clipboard.
<END_TASK>
<USER_TASK:>
Description:
def paste(self,e):
u"""Paste windows clipboard.
Assume single line strip other lines and end of line markers and trailing spaces"""
|
#(Control-v)
if self.enable_win32_clipboard:
txt=clipboard.get_clipboard_text_and_convert(False)
txt=txt.split("\n")[0].strip("\r").strip("\n")
log("paste: >%s<"%map(ord,txt))
self.insert_text(txt)
self.finalize()
|
<SYSTEM_TASK:>
u"""Print all of the functions and their key bindings to the Readline
<END_TASK>
<USER_TASK:>
Description:
def dump_functions(self, e): # ()
u"""Print all of the functions and their key bindings to the Readline
output stream. If a numeric argument is supplied, the output is
formatted in such a way that it can be made part of an inputrc
file. This command is unbound by default."""
|
print
txt="\n".join(self.rl_settings_to_string())
print txt
self._print_prompt()
self.finalize()
|
<SYSTEM_TASK:>
Calls the ctmc.ctmc function
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y=None):
"""Calls the ctmc.ctmc function
Parameters
----------
X : list of lists
(see ctmc function 'data')
y
not used, present for API consistence purpose.
"""
|
self.transmat, self.genmat, self.transcount, self.statetime = ctmc(
X, self.numstates, self.transintv, self.toltime, self.debug)
return self
|
<SYSTEM_TASK:>
Instantaneous RV of star 1 with respect to system center-of-mass
<END_TASK>
<USER_TASK:>
Description:
def RV_1(self):
"""Instantaneous RV of star 1 with respect to system center-of-mass
"""
|
return self.orbpop_long.RV * (self.orbpop_long.M2 / (self.orbpop_long.M1 + self.orbpop_long.M2))
|
<SYSTEM_TASK:>
Instantaneous RV of star 2 with respect to system center-of-mass
<END_TASK>
<USER_TASK:>
Description:
def RV_2(self):
"""Instantaneous RV of star 2 with respect to system center-of-mass
"""
|
return -self.orbpop_long.RV * (self.orbpop_long.M1 /
(self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.RV_com1
|
<SYSTEM_TASK:>
Instantaneous RV of star 3 with respect to system center-of-mass
<END_TASK>
<USER_TASK:>
Description:
def RV_3(self):
"""Instantaneous RV of star 3 with respect to system center-of-mass
"""
|
return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\
self.orbpop_short.RV_com2
|
<SYSTEM_TASK:>
Projected sky separation of stars
<END_TASK>
<USER_TASK:>
Description:
def Rsky(self):
"""Projected sky separation of stars
"""
|
return np.sqrt(self.position.x**2 + self.position.y**2)
|
<SYSTEM_TASK:>
RVs of star 1 relative to center-of-mass
<END_TASK>
<USER_TASK:>
Description:
def RV_com1(self):
"""RVs of star 1 relative to center-of-mass
"""
|
return self.RV * (self.M2 / (self.M1 + self.M2))
|
<SYSTEM_TASK:>
RVs of star 2 relative to center-of-mass
<END_TASK>
<USER_TASK:>
Description:
def RV_com2(self):
"""RVs of star 2 relative to center-of-mass
"""
|
return -self.RV * (self.M1 / (self.M1 + self.M2))
|
<SYSTEM_TASK:>
Saves all relevant data to .h5 file; so state can be restored.
<END_TASK>
<USER_TASK:>
Description:
def save_hdf(self,filename,path=''):
"""Saves all relevant data to .h5 file; so state can be restored.
"""
|
self.dataframe.to_hdf(filename,'{}/df'.format(path))
|
<SYSTEM_TASK:>
Returns the cardinality of the given resource attribute.
<END_TASK>
<USER_TASK:>
Description:
def get_attribute_cardinality(attribute):
"""
Returns the cardinality of the given resource attribute.
:returns: One of the constants defined in
:class:`evererst.constants.CARDINALITY_CONSTANTS`.
:raises ValueError: If the given attribute is not a relation attribute
(i.e., if it is a terminal attribute).
"""
|
if attribute.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER:
card = CARDINALITY_CONSTANTS.ONE
elif attribute.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION:
card = CARDINALITY_CONSTANTS.MANY
else:
raise ValueError('Can not determine cardinality for non-terminal '
'attributes.')
return card
|
<SYSTEM_TASK:>
Load a configuration from a default or specified configuration file, accessing a default or
<END_TASK>
<USER_TASK:>
Description:
def setup(path_config="~/.config/scalar/config.yaml", configuration_name=None):
"""
Load a configuration from a default or specified configuration file, accessing a default or
specified configuration name.
"""
|
global config
global client
global token
global room
# config file
path_config = Path(path_config).expanduser()
log.debug("load config {path}".format(path = path_config))
if not path_config.exists():
log.error("no config {path} found".format(path = path_config))
sys.exit()
else:
with open(str(path_config), "r") as _file:
config = yaml.load(_file)
if not configuration_name:
for configuration in list(config["configurations"].items()):
if configuration[1]["default"]:
config = configuration[1]
else:
config["configurations"][configuration_name]
# connect to homeserver and room
log.debug("Matrix username: " + config["username"])
log.debug("connect to homeserver " + config["homeserver"])
client = MatrixClient(config["homeserver"])
token = client.login_with_password(username = config["username"], password = config["passcode"])
log.debug("connect to room " + config["room_alias"])
room = client.join_room(config["room_alias"])
|
<SYSTEM_TASK:>
A wrapper to start RQ worker as a new process.
<END_TASK>
<USER_TASK:>
Description:
def worker_wrapper(worker_instance, pid_path):
"""
A wrapper to start RQ worker as a new process.
:param worker_instance: RQ's worker instance
:param pid_path: A file to check if the worker
is running or not
"""
|
def exit_handler(*args):
"""
Remove pid file on exit
"""
if len(args) > 0:
print("Exit py signal {signal}".format(signal=args[0]))
remove(pid_path)
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
worker_instance.work()
# Remove pid file if the process can not catch signals
exit_handler(2)
|
<SYSTEM_TASK:>
Trigger new process as a RQ worker.
<END_TASK>
<USER_TASK:>
Description:
def start_worker(self):
"""Trigger new process as a RQ worker."""
|
if not self.include_rq:
return None
worker = Worker(queues=self.queues,
connection=self.connection)
worker_pid_path = current_app.config.get(
"{}_WORKER_PID".format(self.config_prefix), 'rl_worker.pid'
)
try:
worker_pid_file = open(worker_pid_path, 'r')
worker_pid = int(worker_pid_file.read())
print("Worker already started with PID=%d" % worker_pid)
worker_pid_file.close()
return worker_pid
except (IOError, TypeError):
self.worker_process = Process(target=worker_wrapper, kwargs={
'worker_instance': worker,
'pid_path': worker_pid_path
})
self.worker_process.start()
worker_pid_file = open(worker_pid_path, 'w')
worker_pid_file.write("%d" % self.worker_process.pid)
worker_pid_file.close()
print("Start a worker process with PID=%d" %
self.worker_process.pid)
return self.worker_process.pid
|
<SYSTEM_TASK:>
upgrade many libs.
<END_TASK>
<USER_TASK:>
Description:
def upgrade_many(upgrade=True, create_examples_all=True):
"""upgrade many libs.
source: http://arduino.cc/playground/Main/LibraryList
you can set your arduino path if it is not default
os.environ['ARDUINO_HOME'] = '/home/...'
"""
|
urls = set()
def inst(url):
print('upgrading %s' % url)
assert url not in urls
urls.add(url)
try:
lib = install_lib(url, upgrade)
print(' -> %s' % lib)
except Exception as e:
print(e)
############################
# github.com
############################
inst('https://github.com/sensorium/Mozzi/zipball/master')
inst('https://github.com/madsci1016/Arduino-EasyTransfer/zipball/master')
inst('https://github.com/sparkfun/SevSeg/zipball/master')
inst(
'https://github.com/madsci1016/Arduino-SoftEasyTransfer/zipball/master')
inst('https://github.com/madsci1016/Arduino-PS2X/zipball/master')
# inst('http://github.com/wimleers/flexitimer2/zipball/v1.0')# can't install
inst('https://github.com/kerinin/arduino-splines/zipball/master')
inst('https://github.com/asynclabs/WiShield/zipball/master')
inst('https://github.com/asynclabs/dataflash/zipball/master')
inst('https://github.com/slugmobile/AtTouch/zipball/master')
inst(
'https://github.com/carlynorama/Arduino-Library-Button/zipball/master')
inst(
'https://github.com/carlynorama/Arduino-Library-FancyLED/zipball/master')
inst('https://github.com/markfickett/arduinomorse/zipball/master')
inst('https://github.com/rocketscream/Low-Power/zipball/master')
inst(
'https://github.com/arduino-libraries/CapacitiveSensor/zipball/master')
############################
# arduiniana.org
############################
# TODO: how to get latest version??
inst('http://arduiniana.org/PString/PString2.zip')
inst('http://arduiniana.org/Flash/Flash3.zip')
inst('http://arduiniana.org/NewSoftSerial/NewSoftSerial10c.zip')
inst('http://arduiniana.org/Streaming/Streaming4.zip')
inst('http://arduiniana.org/PWMServo/PWMServo.zip')
inst('http://arduiniana.org/TinyGPS/TinyGPS10.zip')
############################
# google
############################
# TODO: how to get latest version??
# parse http://code.google.com/p/arduino-pinchangeint/downloads/list
# simplified version in core
inst('http://rogue-code.googlecode.com/files/Arduino-Library-Tone.zip')
inst('http://arduino-playground.googlecode.com/files/LedDisplay03.zip')
inst('http://sserial2mobile.googlecode.com/files/SSerial2Mobile-1.1.0.zip')
inst('http://webduino.googlecode.com/files/webduino-1.4.1.zip')
inst('http://arduino-pid-library.googlecode.com/files/PID_v1.0.1.zip')
inst('http://ideoarduinolibraries.googlecode.com/files/Qtouch1Wire.zip')
inst('http://arduino-timerone.googlecode.com/files/TimerOne-v8.zip')
inst('http://arduinounit.googlecode.com/files/arduinounit-1.4.2.zip')
inst('http://arduinode.googlecode.com/files/arduinode_0.1.zip')
inst('http://arduino-edb.googlecode.com/files/EDB_r7.zip')
inst('http://arduino-dblib.googlecode.com/files/DB.zip')
inst(
'http://morse-endecoder.googlecode.com/files/Morse_EnDecoder_2010.12.06.tar.gz')
inst('http://arduino-pinchangeint.googlecode.com/files/PinChangeInt.zip')
inst('http://arduino-tvout.googlecode.com/files/TVout_R5.91.zip')
inst('http://narcoleptic.googlecode.com/files/Narcoleptic_v1a.zip')
############################
# teensy
############################
inst('http://www.pjrc.com/teensy/arduino_libraries/OneWire.zip')
inst('http://www.pjrc.com/teensy/arduino_libraries/VirtualWire.zip')
inst('http://www.pjrc.com/teensy/arduino_libraries/FrequencyTimer2.zip')
inst('http://www.pjrc.com/teensy/arduino_libraries/FreqCount.zip')
inst('http://www.pjrc.com/teensy/arduino_libraries/FreqMeasure.zip')
############################
# others
############################
# too big
# inst('http://www.state-machine.com/arduino/qp_arduino.zip')
# The owner of this website (download.milesburton.com) has banned your access based on your browser's signature
# inst('http://download.milesburton.com/Arduino/MaximTemperature/DallasTemperature_370Beta.zip')
inst('http://www.shikadi.net/files/arduino/SerialIP-1.0.zip')
inst(
'http://siggiorn.com/wp-content/uploads/libraries/ArduinoByteBuffer.zip')
inst(
'http://siggiorn.com/wp-content/uploads/libraries/ArduinoSerialManager.zip')
inst('http://arduino-tweet.appspot.com/Library-Twitter-1.2.2.zip')
# can't install
# inst('http://gkaindl.com/php/download.php?key=ArduinoEthernet')
inst(
'http://sebastian.setz.name/wp-content/uploads/2011/01/multiCameraIrControl_1-5.zip')
inst('http://alexandre.quessy.net/static/avr/Tween_01.zip')
inst(
'http://www.lpelettronica.it/images/stories/LPM11162_images/Arduino/LPM11162_ArduinoLib_v1.zip')
# inst('http://nootropicdesign.com/hackvision/downloads/Controllers.zip')
inst(
'http://interface.khm.de/wp-content/uploads/2009/01/FreqCounter_1_12.zip')
inst(
'http://interface.khm.de/wp-content/uploads/2010/06/FreqPeriod_1_12.zip')
############################
# arduino.cc
############################
inst('http://arduino.cc/playground/uploads/Main/PS2Keyboard002.zip')
inst('http://arduino.cc/playground/uploads/Code/Metro.zip')
inst('http://www.arduino.cc/playground/uploads/Main/MsTimer2.zip')
# can't install
# inst('http://www.arduino.cc/playground/uploads/Code/Time.zip')
inst('http://arduino.cc/playground/uploads/Main/LedControl.zip')
# can't install
# inst('http://www.arduino.cc/playground/uploads/Code/ks0108GLCD.zip')#
inst('http://arduino.cc/playground/uploads/Code/Bounce.zip')
inst('http://arduino.cc/playground/uploads/Main/CapacitiveSense003.zip')
inst('http://arduino.cc/playground/uploads/Main/PinChangeInt.zip')
# can't install
# inst('http://arduino.cc/playground/uploads/Code/TimerThree.zip')
inst('http://arduino.cc/playground/uploads/Code/TimedAction-1_6.zip')
# can't install
# inst('http://www.arduino.cc/playground/uploads/Code/Time.zip')
inst('http://arduino.cc/playground/uploads/Code/EventFuse.zip')
inst('http://arduino.cc/playground/uploads/Code/Charlieplex.zip')
inst('http://arduino.cc/playground/uploads/Code/DigitalToggle.zip')
inst('http://arduino.cc/playground/uploads/Code/Enerlib.zip')
inst('http://arduino.cc/playground/uploads/Code/AdvButton_11.zip')
# old version
# inst('http://arduino.cc/playground/uploads/Code/AdvButton.zip')
# can't install
# inst('http://arduino.cc/playground/uploads/Code/SerialDebugger.zip') #
inst('http://arduino.cc/playground/uploads/Code/MatrixMath.zip')
inst('http://arduino.cc/playground/uploads/Code/StackArray.zip')
inst('http://arduino.cc/playground/uploads/Code/StackList.zip')
inst('http://arduino.cc/playground/uploads/Code/QueueArray.zip')
inst('http://arduino.cc/playground/uploads/Code/QueueList.zip')
inst('http://arduino.cc/playground/uploads/Code/Ping-1_3.zip')
inst('http://www.arduino.cc/playground/uploads/Code/LED.zip')
# inst('')
if create_examples_all:
print('create "all" menu item')
exampallcreate.create_examples_all()
print('install finished')
|
<SYSTEM_TASK:>
Flags the batch as confirmed by updating
<END_TASK>
<USER_TASK:>
Description:
def confirm(self, batch_id=None, filename=None):
"""Flags the batch as confirmed by updating
confirmation_datetime on the history model for this batch.
"""
|
if batch_id or filename:
export_history = self.history_model.objects.using(self.using).filter(
Q(batch_id=batch_id) | Q(filename=filename),
sent=True,
confirmation_code__isnull=True,
)
else:
export_history = self.history_model.objects.using(self.using).filter(
sent=True, confirmation_code__isnull=True
)
if export_history.count() == 0:
raise ConfirmationError(
"Nothing to do. No history of sent and unconfirmed files"
)
else:
confirmation_code = ConfirmationCode()
export_history.update(
confirmation_code=confirmation_code.identifier,
confirmation_datetime=get_utcnow(),
)
return confirmation_code.identifier
|
<SYSTEM_TASK:>
Performs stemming or lemmatizing on a single word.
<END_TASK>
<USER_TASK:>
Description:
def clean_single_word(word, lemmatizing="wordnet"):
"""
Performs stemming or lemmatizing on a single word.
If we are to search for a word in a clean bag-of-words, we need to search it after the same kind of preprocessing.
Inputs: - word: A string containing the source word.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - lemma: The resulting clean lemma or stem.
"""
|
if lemmatizing == "porter":
porter = PorterStemmer()
lemma = porter.stem(word)
elif lemmatizing == "snowball":
snowball = SnowballStemmer('english')
lemma = snowball.stem(word)
elif lemmatizing == "wordnet":
wordnet = WordNetLemmatizer()
lemma = wordnet.lemmatize(word)
else:
print("Invalid lemmatizer argument.")
raise RuntimeError
return lemma
|
<SYSTEM_TASK:>
Extracts a clean bag-of-words from a document.
<END_TASK>
<USER_TASK:>
Description:
def clean_document(document,
sent_tokenize, _treebank_word_tokenize,
tagger,
lemmatizer,
lemmatize,
stopset,
first_cap_re, all_cap_re,
digits_punctuation_whitespace_re,
pos_set):
"""
Extracts a clean bag-of-words from a document.
Inputs: - document: A string containing some text.
Output: - lemma_list: A python list of lemmas or stems.
- lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
"""
|
####################################################################################################################
# Tokenizing text
####################################################################################################################
# start_time = time.perf_counter()
try:
tokenized_document = fast_word_tokenize(document, sent_tokenize, _treebank_word_tokenize)
except LookupError:
print("Warning: Could not tokenize document. If these warnings are commonplace, there is a problem with the nltk resources.")
lemma_list = list()
lemma_to_keywordbag = defaultdict(lambda: defaultdict(int))
return lemma_list, lemma_to_keywordbag
# elapsed_time = time.perf_counter() - start_time
# print("Tokenize", elapsed_time)
####################################################################################################################
# Separate ["camelCase"] into ["camel", "case"] and make every letter lower case
####################################################################################################################
# start_time = time.perf_counter()
tokenized_document = [separate_camel_case(token, first_cap_re, all_cap_re).lower() for token in tokenized_document]
# elapsed_time = time.perf_counter() - start_time
# print("camelCase", elapsed_time)
####################################################################################################################
# Parts of speech tagger
####################################################################################################################
# start_time = time.perf_counter()
tokenized_document = tagger.tag(tokenized_document)
tokenized_document = [token[0] for token in tokenized_document if (token[1] in pos_set)]
# elapsed_time = time.perf_counter() - start_time
# print("POS", elapsed_time)
####################################################################################################################
# Removing digits, punctuation and whitespace
####################################################################################################################
# start_time = time.perf_counter()
tokenized_document_no_punctuation = list()
append_token = tokenized_document_no_punctuation.append
for token in tokenized_document:
new_token = remove_digits_punctuation_whitespace(token, digits_punctuation_whitespace_re)
if not new_token == u'':
append_token(new_token)
# elapsed_time = time.perf_counter() - start_time
# print("digits etc", elapsed_time)
####################################################################################################################
# Removing stopwords
####################################################################################################################
# start_time = time.perf_counter()
tokenized_document_no_stopwords = list()
append_word = tokenized_document_no_stopwords.append
for word in tokenized_document_no_punctuation:
if word not in stopset:
append_word(word)
# elapsed_time = time.perf_counter() - start_time
# print("stopwords 1", elapsed_time)
####################################################################################################################
# Stemming and Lemmatizing
####################################################################################################################
# start_time = time.perf_counter()
lemma_to_keywordbag = defaultdict(lambda: defaultdict(int))
final_doc = list()
append_lemma = final_doc.append
for word in tokenized_document_no_stopwords:
lemma = lemmatize(word)
append_lemma(lemma)
lemma_to_keywordbag[lemma][word] += 1
# elapsed_time = time.perf_counter() - start_time
# print("lemmatize", elapsed_time)
####################################################################################################################
# One more stopword removal
####################################################################################################################
# start_time = time.perf_counter()
lemma_list = list()
append_word = lemma_list.append
for word in final_doc:
if word not in stopset:
append_word(word)
# elapsed_time = time.perf_counter() - start_time
# print("stopwords 2", elapsed_time)
return lemma_list, lemma_to_keywordbag
|
<SYSTEM_TASK:>
Extracts a bag-of-words from each document in a corpus serially.
<END_TASK>
<USER_TASK:>
Description:
def clean_corpus_serial(corpus, lemmatizing="wordnet"):
"""
Extracts a bag-of-words from each document in a corpus serially.
Inputs: - corpus: A python list of python strings. Each string is a document.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - list_of_bags_of_words: A list of python dictionaries representing bags-of-words.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
|
list_of_bags_of_words = list()
append_bag_of_words = list_of_bags_of_words.append
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for document in corpus:
word_list, lemma_to_keywordbag = clean_document(document=document, lemmatizing=lemmatizing) # TODO: Alter this.
bag_of_words = combine_word_list(word_list)
append_bag_of_words(bag_of_words)
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return list_of_bags_of_words, lemma_to_keywordbag_total
|
<SYSTEM_TASK:>
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes.
<END_TASK>
<USER_TASK:>
Description:
def extract_bag_of_words_from_corpus_parallel(corpus, lemmatizing="wordnet"):
"""
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes.
Inputs: - corpus: A list of strings.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: This is a bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
|
####################################################################################################################
# Map and reduce document cleaning.
####################################################################################################################
# Build a pool of processes.
pool = Pool(processes=get_threads_number()*2,)
# Partition the tweets to chunks.
partitioned_corpus = chunks(corpus, len(corpus) / get_threads_number())
# Map the cleaning of the tweet corpus to a pool of processes.
list_of_bags_of_words, list_of_lemma_to_keywordset_maps = pool.map(partial(clean_corpus_serial, lemmatizing=lemmatizing), partitioned_corpus)
# Reduce dictionaries to a single dictionary serially.
bag_of_words = reduce_list_of_bags_of_words(list_of_bags_of_words)
# Reduce lemma to keyword maps to a single dictionary.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordset_maps:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total
|
<SYSTEM_TASK:>
Executes routes.py route middleware
<END_TASK>
<USER_TASK:>
Description:
def middleware(func):
""" Executes routes.py route middleware """
|
@wraps(func)
def parse(*args, **kwargs):
""" get middleware from route, execute middleware in order """
middleware = copy.deepcopy(kwargs['middleware'])
kwargs.pop('middleware')
if request.method == "OPTIONS":
# return 200 json response for CORS
return JsonResponse(200)
if middleware is None:
return func(*args, **kwargs)
for mware in middleware:
ware = mware()
if ware.status is False:
return ware.response
return func(*args, **kwargs)
return parse
|
<SYSTEM_TASK:>
progress_bar_media simple tag
<END_TASK>
<USER_TASK:>
Description:
def progress_bar_media():
"""
progress_bar_media simple tag
return rendered script tag for javascript used by progress_bar
"""
|
if PROGRESSBARUPLOAD_INCLUDE_JQUERY:
js = ["http://code.jquery.com/jquery-1.8.3.min.js",]
else:
js = []
js.append("js/progress_bar.js")
m = Media(js=js)
return m.render()
|
<SYSTEM_TASK:>
r"""Send a message to the journal.
<END_TASK>
<USER_TASK:>
Description:
def send(MESSAGE, SOCKET, MESSAGE_ID=None,
CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None,
**kwargs):
r"""Send a message to the journal.
>>> journal.send('Hello world')
>>> journal.send('Hello, again, world', FIELD2='Greetings!')
>>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef')
Value of the MESSAGE argument will be used for the MESSAGE=
field. MESSAGE must be a string and will be sent as UTF-8 to
the journal.
MESSAGE_ID can be given to uniquely identify the type of
message. It must be a string or a uuid.UUID object.
CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to
identify the caller. Unless at least on of the three is given,
values are extracted from the stack frame of the caller of
send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE
must be an integer.
Additional fields for the journal entry can only be specified
as keyword arguments. The payload can be either a string or
bytes. A string will be sent as UTF-8, and bytes will be sent
as-is to the journal.
Other useful fields include PRIORITY, SYSLOG_FACILITY,
SYSLOG_IDENTIFIER, SYSLOG_PID.
"""
|
args = ['MESSAGE=' + MESSAGE]
if MESSAGE_ID is not None:
id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID)
args.append('MESSAGE_ID=' + id)
if CODE_LINE == CODE_FILE == CODE_FUNC == None:
CODE_FILE, CODE_LINE, CODE_FUNC = \
_traceback.extract_stack(limit=2)[0][:3]
if CODE_FILE is not None:
args.append('CODE_FILE=' + CODE_FILE)
if CODE_LINE is not None:
args.append('CODE_LINE={:d}'.format(CODE_LINE))
if CODE_FUNC is not None:
args.append('CODE_FUNC=' + CODE_FUNC)
args.extend(_make_line(key.upper(), val) for key, val in kwargs.items())
return sendv(SOCKET, *args)
|
<SYSTEM_TASK:>
Checks if item already exists in database
<END_TASK>
<USER_TASK:>
Description:
def exists(self):
""" Checks if item already exists in database """
|
self_object = self.query.filter_by(id=self.id).first()
if self_object is None:
return False
return True
|
<SYSTEM_TASK:>
Converts a raw GCVS record to a dictionary of star data.
<END_TASK>
<USER_TASK:>
Description:
def row_to_dict(self, row):
"""
Converts a raw GCVS record to a dictionary of star data.
"""
|
constellation = self.parse_constellation(row[0])
name = self.parse_name(row[1])
ra, dec = self.parse_coordinates(row[2])
variable_type = row[3].strip()
max_magnitude, symbol = self.parse_magnitude(row[4])
min_magnitude, symbol = self.parse_magnitude(row[5])
if symbol == '(' and max_magnitude is not None:
# this is actually amplitude
min_magnitude = max_magnitude + min_magnitude
epoch = self.parse_epoch(row[8])
period = self.parse_period(row[10])
return {
'constellation': constellation,
'name': name,
'ra': ra,
'dec': dec,
'variable_type': variable_type,
'max_magnitude': max_magnitude,
'min_magnitude': min_magnitude,
'epoch': epoch,
'period': period,
}
|
<SYSTEM_TASK:>
Converts magnitude field to a float value, or ``None`` if GCVS does
<END_TASK>
<USER_TASK:>
Description:
def parse_magnitude(self, magnitude_str):
"""
Converts magnitude field to a float value, or ``None`` if GCVS does
not list the magnitude.
Returns a tuple (magnitude, symbol), where symbol can be either an
empty string or a single character - one of '<', '>', '('.
"""
|
symbol = magnitude_str[0].strip()
magnitude = magnitude_str[1:6].strip()
return float(magnitude) if magnitude else None, symbol
|
<SYSTEM_TASK:>
Converts period field to a float value or ``None`` if there is
<END_TASK>
<USER_TASK:>
Description:
def parse_period(self, period_str):
"""
Converts period field to a float value or ``None`` if there is
no period in GCVS record.
"""
|
period = period_str.translate(TRANSLATION_MAP)[3:14].strip()
return float(period) if period else None
|
<SYSTEM_TASK:>
install hwpackrary from web or local files system.
<END_TASK>
<USER_TASK:>
Description:
def install_hwpack(url, replace_existing=False):
"""install hwpackrary from web or local files system.
:param url: web address or file path
:param replace_existing: bool
:rtype: None
"""
|
d = tmpdir(tmpdir())
f = download(url)
Archive(f).extractall(d)
clean_dir(d)
src_dhwpack = find_hwpack_dir(d)
targ_dhwpack = hwpack_dir() / src_dhwpack.name
doaction = 0
if targ_dhwpack.exists():
log.debug('hwpack already exists: %s', targ_dhwpack)
if replace_existing:
log.debug('remove %s', targ_dhwpack)
targ_dhwpack.rmtree()
doaction = 1
else:
doaction = 1
if doaction:
log.debug('move %s -> %s', src_dhwpack, targ_dhwpack)
src_dhwpack.move(targ_dhwpack)
hwpack_dir().copymode(targ_dhwpack)
for x in targ_dhwpack.walk():
hwpack_dir().copymode(x)
|
<SYSTEM_TASK:>
restore a volume from a backup
<END_TASK>
<USER_TASK:>
Description:
def restore(self, volume_id, **kwargs):
"""
restore a volume from a backup
"""
|
# These arguments are required
self.required('create', kwargs, ['backup', 'size'])
# Optional Arguments
volume_id = volume_id or str(uuid.uuid4())
kwargs['volume_type_name'] = kwargs['volume_type_name'] or 'vtype'
kwargs['size'] = kwargs['size'] or 1
# Make the request
return self.http_put('/volumes/%s' % volume_id,
params=self.unused(kwargs))
|
<SYSTEM_TASK:>
This method refactors a Protobuf file to import from a namespace
<END_TASK>
<USER_TASK:>
Description:
def proto_refactor(proto_filename, namespace, namespace_path):
"""This method refactors a Protobuf file to import from a namespace
that will map to the desired python package structure. It also ensures
that the syntax is set to "proto2", since protoc complains without it.
Args:
proto_filename (str): the protobuf filename to be refactored
namespace (str): the desired package name (i.e. "dropsonde.py2")
namespace_path (str): the desired path corresponding to the package
name (i.e. "dropsonde/py2")
"""
|
with open(proto_filename) as f:
data = f.read()
if not re.search('syntax = "proto2"', data):
insert_syntax = 'syntax = "proto2";\n'
data = insert_syntax + data
substitution = 'import "{}/\\1";'.format(namespace_path)
data = re.sub('import\s+"([^"]+\.proto)"\s*;', substitution, data)
return data
|
<SYSTEM_TASK:>
This method runs the refactoring on all the Protobuf files in the
<END_TASK>
<USER_TASK:>
Description:
def proto_refactor_files(dest_dir, namespace, namespace_path):
"""This method runs the refactoring on all the Protobuf files in the
Dropsonde repo.
Args:
dest_dir (str): directory where the Protobuf files lives.
namespace (str): the desired package name (i.e. "dropsonde.py2")
namespace_path (str): the desired path corresponding to the package
name (i.e. "dropsonde/py2")
"""
|
for dn, dns, fns in os.walk(dest_dir):
for fn in fns:
fn = os.path.join(dn, fn)
if fnmatch.fnmatch(fn, '*.proto'):
data = proto_refactor(fn, namespace, namespace_path)
with open(fn, 'w') as f:
f.write(data)
|
<SYSTEM_TASK:>
Copies the source Protobuf files into a build directory.
<END_TASK>
<USER_TASK:>
Description:
def clone_source_dir(source_dir, dest_dir):
"""Copies the source Protobuf files into a build directory.
Args:
source_dir (str): source directory of the Protobuf files
dest_dir (str): destination directory of the Protobuf files
"""
|
if os.path.isdir(dest_dir):
print('removing', dest_dir)
shutil.rmtree(dest_dir)
shutil.copytree(source_dir, dest_dir)
|
<SYSTEM_TASK:>
Check if the budget data package fields are all filled in because
<END_TASK>
<USER_TASK:>
Description:
def are_budget_data_package_fields_filled_in(self, resource):
"""
Check if the budget data package fields are all filled in because
if not then this can't be a budget data package
"""
|
fields = ['country', 'currency', 'year', 'status']
return all([self.in_resource(f, resource) for f in fields])
|
<SYSTEM_TASK:>
Try to grab a budget data package schema from the resource.
<END_TASK>
<USER_TASK:>
Description:
def generate_budget_data_package(self, resource):
"""
Try to grab a budget data package schema from the resource.
The schema only allows fields which are defined in the budget
data package specification. If a field is found that is not in
the specification this will return a NotABudgetDataPackageException
and in that case we can just return and ignore the resource
"""
|
# Return if the budget data package fields have not been filled in
if not self.are_budget_data_package_fields_filled_in(resource):
return
try:
resource['schema'] = self.data.schema
except exceptions.NotABudgetDataPackageException:
log.debug('Resource is not a Budget Data Package')
resource['schema'] = []
return
# If the schema fits, this can be exported as a budget data package
# so we add the missing metadata fields to the resource.
resource['BudgetDataPackage'] = True
resource['standard'] = self.data.version
resource['granularity'] = self.data.granularity
resource['type'] = self.data.budget_type
|
<SYSTEM_TASK:>
If the resource has changed we try to generate a budget data
<END_TASK>
<USER_TASK:>
Description:
def before_update(self, context, current, resource):
"""
If the resource has changed we try to generate a budget data
package, but if it hasn't then we don't do anything
"""
|
# Return if the budget data package fields have not been filled in
if not self.are_budget_data_package_fields_filled_in(resource):
return
if resource.get('upload', '') == '':
# If it isn't an upload we check if it's the same url
if current['url'] == resource['url']:
# Return if it's the same
return
else:
self.data.load(resource['url'])
else:
self.data.load(resource['upload'].file)
self.generate_budget_data_package(resource)
|
<SYSTEM_TASK:>
This function serves to upload every file in a user-supplied
<END_TASK>
<USER_TASK:>
Description:
def upload_directory_contents(input_dict, environment_dict):
"""This function serves to upload every file in a user-supplied
source directory to all of the vessels in the current target group.
It essentially calls seash's `upload` function repeatedly, each
time with a file name taken from the source directory.
A note on the input_dict argument:
`input_dict` contains our own `command_dict` (see below), with
the `"[ARGUMENT]"` sub-key of `children` renamed to what
argument the user provided. In our case, this will be the source
dir to read from. (If not, this is an error!)
"""
|
# Check user input and seash state:
# 1, Make sure there is an active user key.
if environment_dict["currentkeyname"] is None:
raise seash_exceptions.UserError("""Error: Please set an identity before using 'uploaddir'!
Example:
!> loadkeys your_user_name
!> as your_user_name
your_user_name@ !>
""")
# 2, Make sure there is a target to work on.
if environment_dict["currenttarget"] is None:
raise seash_exceptions.UserError("""Error: Please set a target to work on before using 'uploaddir'!
Example
your_user_name@ !> on browsegood
your_user_name@browsegood !>
""")
# 3, Complain if we don't have a source dir argument
try:
source_directory = input_dict["uploaddir"]["children"].keys()[0]
except IndexError:
raise seash_exceptions.UserError("""Error: Missing operand to 'uploaddir'
Please specify which source directory's contents you want uploaded, e.g.
your_user_name@browsegood !> uploaddir a_local_directory
""")
# Sanity check: Does the source dir exist?
if not os.path.exists(source_directory):
raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' does not exist.")
# Sanity check: Is the source dir a directory?
if not os.path.isdir(source_directory):
raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' is not a directory.\nDid you mean to use the 'upload' command instead?")
# Alright --- user input and seash state seem sane, let's do the work!
# These are the files we will need to upload:
file_list = os.listdir(source_directory)
for filename in file_list:
# We construct the filename-to-be uploaded from the source dir,
# the OS-specific path separator, and the actual file name.
# This is enough for `upload_target` to find the file.
path_and_filename = source_directory + os.sep + filename
if not os.path.isdir(path_and_filename):
print "Uploading '" + path_and_filename + "'..."
# Construct an input_dict containing command args for seash's
# `upload FILENAME` function.
# XXX There might be a cleaner way to do this.
faked_input_dict = {"upload": {"name": "upload",
"children": {path_and_filename: {"name": "filename"}}}}
command_callbacks.upload_filename(faked_input_dict, environment_dict)
else:
print "Skipping sub-directory '" + filename + "'. You may upload it separately."
|
<SYSTEM_TASK:>
Load a translator file
<END_TASK>
<USER_TASK:>
Description:
def __load_file(self, key_list) -> str:
""" Load a translator file """
|
file = str(key_list[0]) + self.extension
key_list.pop(0)
file_path = os.path.join(self.path, file)
if os.path.exists(file_path):
return Json.from_file(file_path)
else:
raise FileNotFoundError(file_path)
|
<SYSTEM_TASK:>
Load the given repository entity into the session and return a
<END_TASK>
<USER_TASK:>
Description:
def load(self, entity_class, entity):
"""
Load the given repository entity into the session and return a
clone. If it was already loaded before, look up the loaded entity
and return it.
All entities referenced by the loaded entity will also be loaded
(and cloned) recursively.
:raises ValueError: When an attempt is made to load an entity that
has no ID
"""
|
if self.__needs_flushing:
self.flush()
if entity.id is None:
raise ValueError('Can not load entity without an ID.')
cache = self.__get_cache(entity_class)
sess_ent = cache.get_by_id(entity.id)
if sess_ent is None:
if self.__clone_on_load:
sess_ent = self.__clone(entity, cache)
else: # Only needed by the nosql backend pragma: no cover
cache.add(entity)
sess_ent = entity
self.__unit_of_work.register_clean(entity_class, sess_ent)
return sess_ent
|
<SYSTEM_TASK:>
Display the environment of a started container
<END_TASK>
<USER_TASK:>
Description:
def onStart(self, event):
"""
Display the environment of a started container
"""
|
c = event.container
print '+' * 5, 'started:', c
kv = lambda s: s.split('=', 1)
env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])}
print env
|
<SYSTEM_TASK:>
Return a unique identifier for the folder data
<END_TASK>
<USER_TASK:>
Description:
def _identifier_data(self):
"""Return a unique identifier for the folder data"""
|
# Use only file names
data = [ff.name for ff in self.files]
data.sort()
# also use the folder name
data.append(self.path.name)
# add meta data
data += self._identifier_meta()
return hash_obj(data)
|
<SYSTEM_TASK:>
Search a folder for data files
<END_TASK>
<USER_TASK:>
Description:
def _search_files(path):
"""Search a folder for data files
.. versionchanged:: 0.6.0
`path` is not searched recursively anymore
"""
|
path = pathlib.Path(path)
fifo = []
for fp in path.glob("*"):
if fp.is_dir():
continue
for fmt in formats:
# series data is not supported in SeriesFolder
if not fmt.is_series and fmt.verify(fp):
fifo.append((fp, fmt.__name__))
break
# ignore qpimage formats if multiple formats were
# detected.
theformats = [ff[1] for ff in fifo]
formset = set(theformats)
if len(formset) > 1:
fmts_qpimage = ["SingleHdf5Qpimage", "SeriesHdf5Qpimage"]
fifo = [ff for ff in fifo if ff[1] not in fmts_qpimage]
# ignore raw tif files if single_tif_phasics is detected
if len(formset) > 1 and "SingleTifPhasics" in theformats:
fmts_badtif = "SingleTifHolo"
fifo = [ff for ff in fifo if ff[1] not in fmts_badtif]
# otherwise, prevent multiple file formats
theformats2 = [ff[1] for ff in fifo]
formset2 = set(theformats2)
if len(formset2) > 1:
msg = "Qpformat does not support multiple different file " \
+ "formats within one directory: {}".format(formset2)
raise MultipleFormatsNotSupportedError(msg)
# sort the lists
fifo = sorted(fifo)
return fifo
|
<SYSTEM_TASK:>
Return an identifier for the data at index `idx`
<END_TASK>
<USER_TASK:>
Description:
def get_identifier(self, idx):
"""Return an identifier for the data at index `idx`
.. versionchanged:: 0.4.2
indexing starts at 1 instead of 0
"""
|
name = self._get_cropped_file_names()[idx]
return "{}:{}:{}".format(self.identifier, name, idx + 1)
|
<SYSTEM_TASK:>
Verify folder file format
<END_TASK>
<USER_TASK:>
Description:
def verify(path):
"""Verify folder file format
The folder file format is only valid when
there is only one file format present.
"""
|
valid = True
fifo = SeriesFolder._search_files(path)
# dataset size
if len(fifo) == 0:
valid = False
# number of different file formats
fifmts = [ff[1] for ff in fifo]
if len(set(fifmts)) != 1:
valid = False
return valid
|
<SYSTEM_TASK:>
Write record as journal event.
<END_TASK>
<USER_TASK:>
Description:
def emit(self, record):
"""Write record as journal event.
MESSAGE is taken from the message provided by the
user, and PRIORITY, LOGGER, THREAD_NAME,
CODE_{FILE,LINE,FUNC} fields are appended
automatically. In addition, record.MESSAGE_ID will be
used if present.
"""
|
if record.args and isinstance(record.args, collections.Mapping):
extra = dict(self._extra, **record.args) # Merge metadata from handler and record
else:
extra = self._extra
try:
msg = self.format(record)
pri = self.mapPriority(record.levelno)
mid = getattr(record, 'MESSAGE_ID', None)
send(msg,
SOCKET=self.socket,
MESSAGE_ID=mid,
PRIORITY=format(pri),
LOGGER=record.name,
THREAD_NAME=record.threadName,
CODE_FILE=record.pathname,
CODE_LINE=record.lineno,
CODE_FUNC=record.funcName,
**extra)
except Exception:
self.handleError(record)
|
<SYSTEM_TASK:>
Map logging levels to journald priorities.
<END_TASK>
<USER_TASK:>
Description:
def mapPriority(levelno):
"""Map logging levels to journald priorities.
Since Python log level numbers are "sparse", we have
to map numbers in between the standard levels too.
"""
|
if levelno <= _logging.DEBUG:
return LOG_DEBUG
elif levelno <= _logging.INFO:
return LOG_INFO
elif levelno <= _logging.WARNING:
return LOG_WARNING
elif levelno <= _logging.ERROR:
return LOG_ERR
elif levelno <= _logging.CRITICAL:
return LOG_CRIT
else:
return LOG_ALERT
|
<SYSTEM_TASK:>
Get the arguments of a method and return it as a dictionary with the
<END_TASK>
<USER_TASK:>
Description:
def get_args(self, func):
"""
Get the arguments of a method and return it as a dictionary with the
supplied defaults, method arguments with no default are assigned None
"""
|
def reverse(iterable):
if iterable:
iterable = list(iterable)
while len(iterable):
yield iterable.pop()
args, varargs, varkw, defaults = inspect.getargspec(func)
result = {}
for default in reverse(defaults):
result[args.pop()] = default
for arg in reverse(args):
if arg == 'self':
continue
result[arg] = None
return result
|
<SYSTEM_TASK:>
Determine the file format of a folder or a file
<END_TASK>
<USER_TASK:>
Description:
def guess_format(path):
"""Determine the file format of a folder or a file"""
|
for fmt in formats:
if fmt.verify(path):
return fmt.__name__
else:
msg = "Undefined file format: '{}'".format(path)
raise UnknownFileFormatError(msg)
|
<SYSTEM_TASK:>
Load experimental data
<END_TASK>
<USER_TASK:>
Description:
def load_data(path, fmt=None, bg_data=None, bg_fmt=None,
meta_data={}, holo_kw={}, as_type="float32"):
"""Load experimental data
Parameters
----------
path: str
Path to experimental data file or folder
fmt: str
The file format to use (see `file_formats.formats`).
If set to `None`, the file format is guessed.
bg_data: str
Path to background data file or `qpimage.QPImage`
bg_fmt: str
The file format to use (see `file_formats.formats`)
for the background. If set to `None`, the file format
is be guessed.
meta_data: dict
Meta data (see `qpimage.meta.DATA_KEYS`)
as_type: str
Defines the data type that the input data is casted to.
The default is "float32" which saves memory. If high
numerical accuracy is required (does not apply for a
simple 2D phase analysis), set this to double precision
("float64").
Returns
-------
dataobj: SeriesData or SingleData
Object that gives lazy access to the experimental data.
"""
|
path = pathlib.Path(path).resolve()
# sanity checks
for kk in meta_data:
if kk not in qpimage.meta.DATA_KEYS:
msg = "Meta data key not allowed: {}".format(kk)
raise ValueError(msg)
# ignore None or nan values in meta_data
for kk in list(meta_data.keys()):
if meta_data[kk] in [np.nan, None]:
meta_data.pop(kk)
if fmt is None:
fmt = guess_format(path)
else:
if not formats_dict[fmt].verify(path):
msg = "Wrong file format '{}' for '{}'!".format(fmt, path)
raise WrongFileFormatError(msg)
dataobj = formats_dict[fmt](path=path,
meta_data=meta_data,
holo_kw=holo_kw,
as_type=as_type)
if bg_data is not None:
if isinstance(bg_data, qpimage.QPImage):
# qpimage instance
dataobj.set_bg(bg_data)
else:
# actual data on disk
bg_path = pathlib.Path(bg_data).resolve()
if bg_fmt is None:
bg_fmt = guess_format(bg_path)
bgobj = formats_dict[bg_fmt](path=bg_path,
meta_data=meta_data,
holo_kw=holo_kw,
as_type=as_type)
dataobj.set_bg(bgobj)
return dataobj
|
<SYSTEM_TASK:>
Return a string of the form "1 hr 2 min 3 sec" representing the
<END_TASK>
<USER_TASK:>
Description:
def duration(seconds):
"""Return a string of the form "1 hr 2 min 3 sec" representing the
given number of seconds."""
|
if seconds < 1:
return 'less than 1 sec'
seconds = int(round(seconds))
components = []
for magnitude, label in ((3600, 'hr'), (60, 'min'), (1, 'sec')):
if seconds >= magnitude:
components.append('{} {}'.format(seconds // magnitude, label))
seconds %= magnitude
return ' '.join(components)
|
<SYSTEM_TASK:>
Returns the shortcut prefix of browser.
<END_TASK>
<USER_TASK:>
Description:
def _get_shortcut_prefix(self, user_agent, standart_prefix):
"""
Returns the shortcut prefix of browser.
:param user_agent: The user agent of browser.
:type user_agent: str
:param standart_prefix: The default prefix.
:type standart_prefix: str
:return: The shortcut prefix of browser.
:rtype: str
"""
|
# pylint: disable=no-self-use
if user_agent is not None:
user_agent = user_agent.lower()
opera = 'opera' in user_agent
mac = 'mac' in user_agent
konqueror = 'konqueror' in user_agent
spoofer = 'spoofer' in user_agent
safari = 'applewebkit' in user_agent
windows = 'windows' in user_agent
chrome = 'chrome' in user_agent
firefox = (
('firefox' in user_agent)
or ('minefield' in user_agent)
)
internet_explorer = (
('msie' in user_agent)
or ('trident' in user_agent)
)
if opera:
return 'SHIFT + ESC'
elif chrome and mac and (not spoofer):
return 'CTRL + OPTION'
elif safari and (not windows) and (not spoofer):
return 'CTRL + ALT'
elif (not windows) and (safari or mac or konqueror):
return 'CTRL'
elif firefox:
return 'ALT + SHIFT'
elif chrome or internet_explorer:
return 'ALT'
return standart_prefix
return standart_prefix
|
<SYSTEM_TASK:>
Returns the description of role.
<END_TASK>
<USER_TASK:>
Description:
def _get_role_description(self, role):
"""
Returns the description of role.
:param role: The role.
:type role: str
:return: The description of role.
:rtype: str
"""
|
parameter = 'role-' + role.lower()
if self.configure.has_parameter(parameter):
return self.configure.get_parameter(parameter)
return None
|
<SYSTEM_TASK:>
Generate the list of shortcuts of page.
<END_TASK>
<USER_TASK:>
Description:
def _generate_list_shortcuts(self):
"""
Generate the list of shortcuts of page.
"""
|
id_container_shortcuts_before = (
AccessibleDisplayImplementation.ID_CONTAINER_SHORTCUTS_BEFORE
)
id_container_shortcuts_after = (
AccessibleDisplayImplementation.ID_CONTAINER_SHORTCUTS_AFTER
)
local = self.parser.find('body').first_result()
if local is not None:
container_before = self.parser.find(
'#'
+ id_container_shortcuts_before
).first_result()
if (
(container_before is None)
and (self.attribute_accesskey_before)
):
container_before = self.parser.create_element('div')
container_before.set_attribute(
'id',
id_container_shortcuts_before
)
text_container_before = self.parser.create_element('span')
text_container_before.set_attribute(
'class',
AccessibleDisplayImplementation.CLASS_TEXT_SHORTCUTS
)
text_container_before.append_text(
self.attribute_accesskey_before
)
container_before.append_element(text_container_before)
local.prepend_element(container_before)
if container_before is not None:
self.list_shortcuts_before = self.parser.find(
container_before
).find_children('ul').first_result()
if self.list_shortcuts_before is None:
self.list_shortcuts_before = self.parser.create_element(
'ul'
)
container_before.append_element(self.list_shortcuts_before)
container_after = self.parser.find(
'#'
+ id_container_shortcuts_after
).first_result()
if (
(container_after is None)
and (self.attribute_accesskey_after)
):
container_after = self.parser.create_element('div')
container_after.set_attribute(
'id',
id_container_shortcuts_after
)
text_container_after = self.parser.create_element('span')
text_container_after.set_attribute(
'class',
AccessibleDisplayImplementation.CLASS_TEXT_SHORTCUTS
)
text_container_after.append_text(
self.attribute_accesskey_after
)
container_after.append_element(text_container_after)
local.append_element(container_after)
if container_after is not None:
self.list_shortcuts_after = self.parser.find(
container_after
).find_children('ul').first_result()
if self.list_shortcuts_after is None:
self.list_shortcuts_after = self.parser.create_element(
'ul'
)
container_after.append_element(self.list_shortcuts_after)
self.list_shortcuts_added = True
|
<SYSTEM_TASK:>
Insert a element before or after other element.
<END_TASK>
<USER_TASK:>
Description:
def _insert(self, element, new_element, before):
"""
Insert a element before or after other element.
:param element: The reference element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param new_element: The element that be inserted.
:type new_element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param before: To insert the element before the other element.
:type before: bool
"""
|
tag_name = element.get_tag_name()
append_tags = [
'BODY',
'A',
'FIGCAPTION',
'LI',
'DT',
'DD',
'LABEL',
'OPTION',
'TD',
'TH'
]
controls = ['INPUT', 'SELECT', 'TEXTAREA']
if tag_name == 'HTML':
body = self.parser.find('body').first_result()
if body is not None:
self._insert(body, new_element, before)
elif tag_name in append_tags:
if before:
element.prepend_element(new_element)
else:
element.append_element(new_element)
elif tag_name in controls:
labels = []
if element.has_attribute('id'):
labels = self.parser.find(
'label[for="'
+ element.get_attribute('id')
+ '"]'
).list_results()
if not labels:
labels = self.parser.find(element).find_ancestors(
'label'
).list_results()
for label in labels:
self._insert(label, new_element, before)
elif before:
element.insert_before(new_element)
else:
element.insert_after(new_element)
|
<SYSTEM_TASK:>
Force the screen reader display an information of element.
<END_TASK>
<USER_TASK:>
Description:
def _force_read_simple(self, element, text_before, text_after, data_of):
"""
Force the screen reader display an information of element.
:param element: The reference element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param text_before: The text content to show before the element.
:type text_before: str
:param text_after: The text content to show after the element.
:type text_after: str
:param data_of: The name of attribute that links the content with
element.
:type data_of: str
"""
|
self.id_generator.generate_id(element)
identifier = element.get_attribute('id')
selector = '[' + data_of + '="' + identifier + '"]'
reference_before = self.parser.find(
'.'
+ AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE
+ selector
).first_result()
reference_after = self.parser.find(
'.'
+ AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER
+ selector
).first_result()
references = self.parser.find(selector).list_results()
if reference_before in references:
references.remove(reference_before)
if reference_after in references:
references.remove(reference_after)
if not references:
if text_before:
if reference_before is not None:
reference_before.remove_node()
span = self.parser.create_element('span')
span.set_attribute(
'class',
AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE
)
span.set_attribute(data_of, identifier)
span.append_text(text_before)
self._insert(element, span, True)
if text_after:
if reference_after is not None:
reference_after.remove_node()
span = self.parser.create_element('span')
span.set_attribute(
'class',
AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER
)
span.set_attribute(data_of, identifier)
span.append_text(text_after)
self._insert(element, span, False)
|
<SYSTEM_TASK:>
Force the screen reader display an information of element with prefixes
<END_TASK>
<USER_TASK:>
Description:
def _force_read(
self,
element,
value,
text_prefix_before,
text_suffix_before,
text_prefix_after,
text_suffix_after,
data_of
):
"""
Force the screen reader display an information of element with prefixes
or suffixes.
:param element: The reference element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param value: The value to be show.
:type value: str
:param text_prefix_before: The prefix of value to show before the
element.
:type text_prefix_before: str
:param text_suffix_before: The suffix of value to show before the
element.
:type text_suffix_before: str
:param text_prefix_after: The prefix of value to show after the
element.
:type text_prefix_after: str
:param text_suffix_after: The suffix of value to show after the
element.
:type text_suffix_after: str
:param data_of: The name of attribute that links the content with
element.
:type data_of: str
"""
|
if (text_prefix_before) or (text_suffix_before):
text_before = text_prefix_before + value + text_suffix_before
else:
text_before = ''
if (text_prefix_after) or (text_suffix_after):
text_after = text_prefix_after + value + text_suffix_after
else:
text_after = ''
self._force_read_simple(element, text_before, text_after, data_of)
|
<SYSTEM_TASK:>
Decorator to mark a function as a provider.
<END_TASK>
<USER_TASK:>
Description:
def provider(func=None, *, singleton=False, injector=None):
"""
Decorator to mark a function as a provider.
Args:
singleton (bool): The returned value should be a singleton or shared
instance. If False (the default) the provider function will be
invoked again for every time it's needed for injection.
injector (Injector): If provided, the function is immediately
registered as a provider with the injector instance.
Example:
@diay.provider(singleton=True)
def myfunc() -> MyClass:
return MyClass(args)
"""
|
def decorator(func):
wrapped = _wrap_provider_func(func, {'singleton': singleton})
if injector:
injector.register_provider(wrapped)
return wrapped
if func:
return decorator(func)
return decorator
|
<SYSTEM_TASK:>
Mark a class or function for injection, meaning that a DI container knows
<END_TASK>
<USER_TASK:>
Description:
def inject(*args, **kwargs):
"""
Mark a class or function for injection, meaning that a DI container knows
that it should inject dependencies into it.
Normally you won't need this as the injector will inject the required
arguments anyway, but it can be used to inject properties into a class
without having to specify it in the constructor, or to inject arguments
that aren't properly type hinted.
Example:
@diay.inject('foo', MyClass)
class MyOtherClass: pass
assert isinstance(injector.get(MyOtherClass).foo, MyClass)
"""
|
def wrapper(obj):
if inspect.isclass(obj) or callable(obj):
_inject_object(obj, *args, **kwargs)
return obj
raise DiayException("Don't know how to inject into %r" % obj)
return wrapper
|
<SYSTEM_TASK:>
Register a class method lazily as a provider.
<END_TASK>
<USER_TASK:>
Description:
def register_lazy_provider_method(self, cls, method):
"""
Register a class method lazily as a provider.
"""
|
if 'provides' not in getattr(method, '__di__', {}):
raise DiayException('method %r is not a provider' % method)
@functools.wraps(method)
def wrapper(*args, **kwargs):
return getattr(self.get(cls), method.__name__)(*args, **kwargs)
self.factories[method.__di__['provides']] = wrapper
|
<SYSTEM_TASK:>
Set the factory for something.
<END_TASK>
<USER_TASK:>
Description:
def set_factory(self, thing: type, value, overwrite=False):
"""
Set the factory for something.
"""
|
if thing in self.factories and not overwrite:
raise DiayException('factory for %r already exists' % thing)
self.factories[thing] = value
|
<SYSTEM_TASK:>
Set an instance of a thing.
<END_TASK>
<USER_TASK:>
Description:
def set_instance(self, thing: type, value, overwrite=False):
"""
Set an instance of a thing.
"""
|
if thing in self.instances and not overwrite:
raise DiayException('instance for %r already exists' % thing)
self.instances[thing] = value
|
<SYSTEM_TASK:>
Get an instance of some type.
<END_TASK>
<USER_TASK:>
Description:
def get(self, thing: type):
"""
Get an instance of some type.
"""
|
if thing in self.instances:
return self.instances[thing]
if thing in self.factories:
fact = self.factories[thing]
ret = self.get(fact)
if hasattr(fact, '__di__') and fact.__di__['singleton']:
self.instances[thing] = ret
return ret
if inspect.isclass(thing):
return self._call_class_init(thing)
elif callable(thing):
return self.call(thing)
raise DiayException('cannot resolve: %r' % thing)
|
<SYSTEM_TASK:>
Call a function, resolving any type-hinted arguments.
<END_TASK>
<USER_TASK:>
Description:
def call(self, func, *args, **kwargs):
"""
Call a function, resolving any type-hinted arguments.
"""
|
guessed_kwargs = self._guess_kwargs(func)
for key, val in guessed_kwargs.items():
kwargs.setdefault(key, val)
try:
return func(*args, **kwargs)
except TypeError as exc:
msg = (
"tried calling function %r but failed, probably "
"because it takes arguments that cannot be resolved"
) % func
raise DiayException(msg) from exc
|
<SYSTEM_TASK:>
Converts elements returned by an iterable into instances of
<END_TASK>
<USER_TASK:>
Description:
def _convert_iterable(self, iterable):
"""Converts elements returned by an iterable into instances of
self._wrapper
"""
|
# Return original if _wrapper isn't callable
if not callable(self._wrapper):
return iterable
return [self._wrapper(x) for x in iterable]
|
<SYSTEM_TASK:>
Returns the first object encountered that matches the specified
<END_TASK>
<USER_TASK:>
Description:
def get(self, **kwargs):
"""Returns the first object encountered that matches the specified
lookup parameters.
>>> site_list.get(id=1)
{'url': 'http://site1.tld/', 'published': False, 'id': 1}
>>> site_list.get(published=True, id__lt=3)
{'url': 'http://site1.tld/', 'published': True, 'id': 2}
>>> site_list.filter(published=True).get(id__lt=3)
{'url': 'http://site1.tld/', 'published': True, 'id': 2}
If the QueryList contains multiple elements that match the criteria,
only the first match will be returned. Use ``filter()`` to retrieve
the entire set.
If no match is found in the QueryList, the method will raise a
``NotFound`` exception.
>>> site_list.get(id=None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "querylist/list.py", line 113, in get
"Element not found with attributes: %s" % kv_str)
querylist.list.NotFound: Element not found with attributes: id=None
"""
|
for x in self:
if self._check_element(kwargs, x):
return x
kv_str = self._stringify_kwargs(kwargs)
raise QueryList.NotFound(
"Element not found with attributes: %s" % kv_str)
|
<SYSTEM_TASK:>
Run the fnExchange server
<END_TASK>
<USER_TASK:>
Description:
def runserver(ctx, conf, port, foreground):
"""Run the fnExchange server"""
|
config = read_config(conf)
debug = config['conf'].get('debug', False)
click.echo('Debug mode {0}.'.format('on' if debug else 'off'))
port = port or config['conf']['server']['port']
app_settings = {
'debug': debug,
'auto_reload': config['conf']['server'].get('auto_reload', False),
}
handlers_settings = __create_handler_settings(config)
if foreground:
click.echo('Requested mode: foreground')
start_app(port, app_settings, handlers_settings)
else:
click.echo('Requested mode: background')
# subprocess.call([sys.executable, 'yourscript.py'], env=os.environ.copy())
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.