code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def __store_other(self, o, method_name, member):
self.__store__[ method_name ] = eval( "o." + method_name )
self.__store__[ method_name[0].lower() + method_name[1:] ] = eval( "o." + method_name )
|
Stores a reference to an attribute on o
:param mixed o: Some object
:param str method_name: The name of the attribute
:param mixed member: The attribute
|
def process(self, tup):
group_key = self.group_key(tup)
self._batches[group_key].append(tup)
|
Group non-tick Tuples into batches by ``group_key``.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
|
def _ReadFlowResponseCounts(self, request_keys, cursor=None):
query =
condition_template =
conditions = [condition_template] * len(request_keys)
args = []
for client_id, flow_id, request_id in request_keys:
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
response_counts = {}
for (client_id_int, flow_id_int, request_id, count) in cursor.fetchall():
request_key = (db_utils.IntToClientID(client_id_int),
db_utils.IntToFlowID(flow_id_int), request_id)
response_counts[request_key] = count
return response_counts
|
Reads counts of responses for the given requests.
|
def remove_all(self, locator):
components = []
if locator == None:
return components
self._lock.acquire()
try:
for reference in reversed(self._references):
if reference.match(locator):
self._references.remove(reference)
components.append(reference.get_component())
finally:
self._lock.release()
return components
|
Removes all component references that match the specified locator.
:param locator: a locator to remove reference by.
:return: a list, containing all removed references.
|
def chromaticity_to_XYZ(white, red, green, blue):
xW, yW = white
xR, yR = red
xG, yG = green
xB, yB = blue
R = G = B = 1.0
z = yW * ((xG - xB) * yR - (xR - xB) * yG + (xR - xG) * yB)
YA = yR / R * ((xG - xB) * yW - (xW - xB) * yG + (xW - xG) * yB) / z
XA = YA * xR / yR
ZA = YA * ((1 - xR) / yR - 1)
YB = - yG / G * ((xR - xB) * yW - (xW - xB) * yR + (xW - xR) * yB) / z
XB = YB * xG / yG
ZB = YB * ((1 - xG) / yG - 1)
YC = yB / B * ((xR - xG) * yW - (xW - xG) * yR + (xW - xR) * yG) / z
XC = YC * xB / yB
ZC = YC * ((1 - xB) / yB - 1)
XW = XA * R + XB * G + XC * B
YW = YA * R + YB * G + YC * B
ZW = ZA * R + ZB * G + ZC * B
return (XW, YW, ZW), (XA, YA, ZA), (XB, YB, ZB), (XC, YC, ZC)
|
From the "CalRGB Color Spaces" section of "PDF Reference", 6th ed.
|
def add_bookmark(self, new_bookmark, *, max_retries=3):
with (yield from self._lock):
bookmarks = yield from self._get_bookmarks()
try:
modified_bookmarks = list(bookmarks)
if new_bookmark not in bookmarks:
modified_bookmarks.append(new_bookmark)
yield from self._set_bookmarks(modified_bookmarks)
retries = 0
bookmarks = yield from self._get_bookmarks()
while retries < max_retries:
if new_bookmark in bookmarks:
break
modified_bookmarks = list(bookmarks)
modified_bookmarks.append(new_bookmark)
yield from self._set_bookmarks(modified_bookmarks)
bookmarks = yield from self._get_bookmarks()
retries += 1
if new_bookmark not in bookmarks:
raise RuntimeError("Could not add bookmark")
finally:
self._diff_emit_update(bookmarks)
|
Add a bookmark and check whether it was successfully added to the
bookmark list. Already existant bookmarks are not added twice.
:param new_bookmark: the bookmark to add
:type new_bookmark: an instance of :class:`~bookmark_xso.Bookmark`
:param max_retries: the number of retries if setting the bookmark
fails
:type max_retries: :class:`int`
:raises RuntimeError: if the bookmark is not in the bookmark list
after `max_retries` retries.
After setting the bookmark it is checked, whether the bookmark
is in the online storage, if it is not it is tried again at most
`max_retries` times to add the bookmark. A :class:`RuntimeError`
is raised if the bookmark could not be added successfully after
`max_retries`.
|
def send_api_request(self, method, url, params={}, valid_parameters=[], needs_api_key=False):
if needs_api_key:
params.update({'api_key': self.request.consumer_key})
valid_parameters.append('api_key')
files = {}
if 'data' in params:
if isinstance(params['data'], list):
for idx, data in enumerate(params['data']):
files['data['+str(idx)+']'] = open(params['data'][idx], 'rb')
else:
files = {'data': open(params['data'], 'rb')}
del params['data']
validate_params(valid_parameters, params)
if method == "get":
return self.request.get(url, params)
else:
return self.request.post(url, params, files)
|
Sends the url with parameters to the requested url, validating them
to make sure that they are what we expect to have passed to us
:param method: a string, the request method you want to make
:param params: a dict, the parameters used for the API request
:param valid_parameters: a list, the list of valid parameters
:param needs_api_key: a boolean, whether or not your request needs an api key injected
:returns: a dict parsed from the JSON response
|
def copy(src, trg, transform=None):
source = open(src[0], src[1])
target = open(trg[0], trg[1], autocommit=1000)
for item in source.get():
item = dict(item)
if '_id' in item:
del item['_id']
if transform:
item = transform(item)
target.put(trg[0](item))
source.close()
target.commit()
target.close()
|
copy items with optional fields transformation
|
def get_ptrm_dec_and_inc(self):
PTRMS = self.PTRMS[1:]
CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS])
tmin, tmax = self.t_Arai[0], self.t_Arai[-1]
ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False)
ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart)
self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free
self.pars['ptrms_tau_Free'] = ptrm_tau_Free
self.pars['ptrms_angle_Free'] = ptrms_angle
|
not included in spd.
|
def update(self, friendly_name=values.unset, max_size=values.unset):
return self._proxy.update(friendly_name=friendly_name, max_size=max_size, )
|
Update the QueueInstance
:param unicode friendly_name: A string to describe this resource
:param unicode max_size: The max number of calls allowed in the queue
:returns: Updated QueueInstance
:rtype: twilio.rest.api.v2010.account.queue.QueueInstance
|
def tempfile_writer(target):
tmp = target.parent / ('_%s' % target.name)
try:
with tmp.open('wb') as fd:
yield fd
except:
tmp.unlink()
raise
LOG.debug('rename %s -> %s', tmp, target)
tmp.rename(target)
|
write cache data to a temporary location. when writing is
complete, rename the file to the actual location. delete
the temporary file on any error
|
def DEFINE_integer_list(self, name, default, help, constant=False):
self.AddOption(
type_info.List(
name=name,
default=default,
description=help,
validator=type_info.Integer()),
constant=constant)
|
A helper for defining lists of integer options.
|
def update_db():
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
managed_process_dao.clear()
for process_name, process_entry in context.process_context.items():
if not isinstance(process_entry, ManagedProcessEntry):
continue
managed_process_dao.update(process_entry)
logger.info('Updated DB with process entry {0} from the context.'.format(process_entry.key))
|
writes to managed_process table records from the context.process_context
|
def update():
metadata = _init()
if S3_SYNC_ON_UPDATE:
log.info('Syncing local cache from S3...')
for saltenv, env_meta in six.iteritems(metadata):
for bucket_files in _find_files(env_meta):
for bucket, files in six.iteritems(bucket_files):
for file_path in files:
cached_file_path = _get_cached_file_name(bucket, saltenv, file_path)
log.info('%s - %s : %s', bucket, saltenv, file_path)
_get_file_from_s3(metadata, saltenv, bucket, file_path, cached_file_path)
log.info('Sync local cache from S3 completed.')
|
Update the cache file for the bucket.
|
def infer_data_type(data_container):
assert isinstance(data_container, list) or isinstance(
data_container, tuple
), "data_container should be a list or tuple."
assert (
len(set(data_container)) > 1
), "There should be more than one value in the data container."
assert is_data_homogenous(
data_container
), "Data are not of a homogenous type!"
datum = data_container[0]
if len(set(data_container)) == 2:
return "categorical"
elif isinstance(datum, str):
return "categorical"
elif isinstance(datum, int):
return "ordinal"
elif isinstance(datum, float):
return "continuous"
else:
raise ValueError("Not possible to tell what the data type is.")
|
For a given container of data, infer the type of data as one of
continuous, categorical, or ordinal.
For now, it is a one-to-one mapping as such:
- str: categorical
- int: ordinal
- float: continuous
There may be better ways that are not currently implemented below. For
example, with a list of numbers, we can check whether the number of unique
entries is less than or equal to 12, but has over 10000+ entries. This
would be a good candidate for floats being categorical.
:param data_container: A generic container of data points.
:type data_container: `iterable`
|
def could_be_unfinished_char(seq, encoding):
if decodable(seq, encoding):
return False
if encodings.codecs.getdecoder('utf8') is encodings.codecs.getdecoder(encoding):
return could_be_unfinished_utf8(seq)
elif encodings.codecs.getdecoder('ascii') is encodings.codecs.getdecoder(encoding):
return False
else:
return True
|
Whether seq bytes might create a char in encoding if more bytes were added
|
def _routes_updated(self, ri):
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = bc.common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'delete', route)
ri.routes = new_routes
|
Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
|
def get_name(self, plugin):
for name, val in self._name2plugin.items():
if plugin == val:
return name
|
Return name for registered plugin or None if not registered.
|
def get_usage(self):
resp = requests.get(FITNESS_URL, timeout=30)
resp.raise_for_status()
soup = BeautifulSoup(resp.text, "html5lib")
eastern = pytz.timezone('US/Eastern')
output = []
for item in soup.findAll("div", {"class": "barChart"}):
data = [x.strip() for x in item.get_text("\n").strip().split("\n")]
data = [x for x in data if x]
name = re.sub(r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$", "", data[0], re.I).strip()
output.append({
"name": name,
"open": "Open" in data[1],
"count": int(data[2].rsplit(" ", 1)[-1]),
"updated": eastern.localize(datetime.datetime.strptime(data[3][8:].strip(), '%m/%d/%Y %I:%M %p')).isoformat(),
"percent": int(data[4][:-1])
})
return output
|
Get fitness locations and their current usage.
|
def __build_completer_map(cls):
ret = {}
for name in dir(cls):
obj = getattr(cls, name)
if iscompleter(obj):
for cmd in obj.__complete_targets__:
if cmd in ret.keys():
raise PyShellError("The command '{}' already has"
" complter"
" method '{}', cannot register a"
" second method '{}'.".format( \
cmd, ret[cmd], obj.__name__))
ret[cmd] = obj.__name__
return ret
|
Build a mapping from command names to completer names.
One command name maps to at most one completer method.
Multiple command names can map to the same completer method.
Only used by __init__() to initialize self._cmd_map. MUST NOT be used
elsewhere.
Raises:
PyShellError: A command maps to multiple helper methods.
|
def _is_bot(user_agent):
bot_list = [
'http://www.baidu.com/search/spider.html',
'python-requests',
'http://ltx71.com/',
'http://drupal.org/',
'www.sogou.com',
'http://search.msn.com/msnbot.htm',
'semantic-visions.com crawler',
]
for bot in bot_list:
if re.search(re.escape(bot), user_agent):
return True
return False
|
Check if user_agent is a known bot.
|
def span(self, span: str) -> List[HistoryItem]:
if span.lower() in ('*', '-', 'all'):
span = ':'
results = self.spanpattern.search(span)
if not results:
raise ValueError('History indices must be positive or negative integers, and may not be zero.')
sep = results.group('separator')
start = results.group('start')
if start:
start = self._zero_based_index(start)
end = results.group('end')
if end:
end = int(end)
if end == -1:
end = None
elif end < -1:
end += 1
if start is not None and end is not None:
result = self[start:end]
elif start is not None and sep is not None:
result = self[start:]
elif end is not None and sep is not None:
result = self[:end]
elif start is not None:
result = [self[start]]
else:
result = self[:]
return result
|
Return an index or slice of the History list,
:param span: string containing an index or a slice
:return: a list of HistoryItems
This method can accommodate input in any of these forms:
a
-a
a..b or a:b
a.. or a:
..a or :a
-a.. or -a:
..-a or :-a
Different from native python indexing and slicing of arrays, this method
uses 1-based array numbering. Users who are not programmers can't grok
0 based numbering. Programmers can usually grok either. Which reminds me,
there are only two hard problems in programming:
- naming
- cache invalidation
- off by one errors
|
def number_of_interactions(G, u=None, v=None, t=None):
return G.number_of_interactions(u, v, t)
|
Return the number of edges between two nodes at time t.
Parameters
----------
u, v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
t : snapshot id (default=None)
If None will be returned the number of edges on the flattened graph.
Returns
-------
nedges : int
The number of edges in the graph. If nodes u and v are specified
return the number of edges between those nodes.
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
>>> dn.number_of_interactions(G, t=0)
|
def mean(data, n=3, **kwargs):
if len(data[-n:]) < n:
forecast = np.nan
else:
forecast = np.mean(data[-n:])
return forecast
|
The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series.
|
def watch_source(self, source_id):
source_id = int(source_id)
r = yield from self._send_cmd(
"WATCH S[%d] ON" % (source_id, ))
self._watched_source.add(source_id)
return r
|
Add a souce to the watchlist.
|
def state(self):
return dict(a=self.a.tolist(), b=self.b.tolist(), c=self.c.tolist(),
pivot=self.pivot.tolist(), position=self.position.tolist())
|
Return the current camera state as a dictionary, it can be
restored with `Camera.restore`.
|
def destroy_volumes(role):
state = dcos_agents_state()
if not state or 'slaves' not in state.keys():
return False
all_success = True
for agent in state['slaves']:
if not destroy_volume(agent, role):
all_success = False
return all_success
|
Destroys all volumes on all the slaves in the cluster for the role.
|
def draw(self):
if self.hidden:
return False
if self.background_color is not None:
render.fillrect(self.surface, self.background_color,
rect=pygame.Rect((0, 0), self.frame.size))
for child in self.children:
if not child.hidden:
child.draw()
topleft = child.frame.topleft
if child.shadowed:
shadow_size = theme.current.shadow_size
shadow_topleft = (topleft[0] - shadow_size // 2,
topleft[1] - shadow_size // 2)
self.surface.blit(child.shadow_image, shadow_topleft)
self.surface.blit(child.surface, topleft)
if child.border_color and child.border_widths is not None:
if (type(child.border_widths) is int and
child.border_widths > 0):
pygame.draw.rect(self.surface, child.border_color,
child.frame, child.border_widths)
else:
tw, lw, bw, rw = child.get_border_widths()
tl = (child.frame.left, child.frame.top)
tr = (child.frame.right - 1, child.frame.top)
bl = (child.frame.left, child.frame.bottom - 1)
br = (child.frame.right - 1, child.frame.bottom - 1)
if tw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, tr, tw)
if lw > 0:
pygame.draw.line(self.surface, child.border_color,
tl, bl, lw)
if bw > 0:
pygame.draw.line(self.surface, child.border_color,
bl, br, bw)
if rw > 0:
pygame.draw.line(self.surface, child.border_color,
tr, br, rw)
return True
|
Do not call directly.
|
def add_data_flow_view_for_model(self, data_flow_m, parent_state_m):
parent_state_v = self.canvas.get_view_for_model(parent_state_m)
hierarchy_level = parent_state_v.hierarchy_level
data_flow_v = DataFlowView(data_flow_m, hierarchy_level)
self.canvas.add(data_flow_v, parent_state_v, index=1)
self._connect_data_flow_to_ports(data_flow_m, data_flow_v, parent_state_m)
|
Creates a `DataFlowView` and adds it to the canvas
The method creates a`DataFlowView` from the given `DataFlowModel `data_flow_m` and adds it to the canvas.
:param DataFlowModel data_flow_m: The data flow for which a view is to be created
:param ContainerStateModel parent_state_m: The parental `StateModel` of the data flow
|
def batchseeds(args):
from jcvi.formats.pdf import cat
xargs = args[1:]
p = OptionParser(batchseeds.__doc__)
opts, args, iopts = add_seeds_options(p, args)
if len(args) != 1:
sys.exit(not p.print_help())
folder, = args
folder = folder.rstrip('/')
outdir = folder + "-debug"
outfile = folder + "-output.tsv"
assert op.isdir(folder)
images = []
jsonfile = opts.calibrate or op.join(folder, "calibrate.json")
if not op.exists(jsonfile):
jsonfile = None
for im in iglob(folder, "*.jpg,*.JPG,*.png"):
if im.endswith((".resize.jpg", ".main.jpg", ".label.jpg")):
continue
if op.basename(im).startswith("calibrate"):
continue
images.append(im)
fw = must_open(outfile, 'w')
print(Seed.header(calibrate=jsonfile), file=fw)
nseeds = 0
for im in images:
imargs = [im, "--noheader", "--outdir={0}".format(outdir)] + xargs
if jsonfile:
imargs += ["--calibrate={0}".format(jsonfile)]
objects = seeds(imargs)
for o in objects:
print(o, file=fw)
nseeds += len(objects)
fw.close()
logging.debug("Processed {0} images.".format(len(images)))
logging.debug("A total of {0} objects written to `{1}`.".\
format(nseeds, outfile))
pdfs = iglob(outdir, "*.pdf")
outpdf = folder + "-output.pdf"
cat(pdfs + ["--outfile={0}".format(outpdf)])
logging.debug("Debugging information written to `{0}`.".format(outpdf))
return outfile
|
%prog batchseeds folder
Extract seed metrics for each image in a directory.
|
def read(self, size=1024):
b = super(PtyProcessUnicode, self).read(size)
return self.decoder.decode(b, final=False)
|
Read at most ``size`` bytes from the pty, return them as unicode.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
The size argument still refers to bytes, not unicode code points.
|
def clipboard_btn_clicked(self, widget, data=None):
_clipboard_text = []
for record in self.debug_logs['logs']:
if self.debugging:
_clipboard_text.append(format_entry(record, show_level=True))
else:
if int(record.levelno) > 10:
if getattr(record, 'event_type', ''):
if not record.event_type.startswith("dep_"):
_clipboard_text.append(format_entry(record))
else:
_clipboard_text.append(format_entry(record))
self.gui_helper.create_clipboard(_clipboard_text)
|
Function copies logs to clipboard.
|
def start_fsweep(self, start=None, stop=None, step=None):
if start:
self.frequency_start = start
if stop:
self.frequency_stop = stop
if step:
self.frequency_step = step
self._write(('SWEEP', Integer), 1)
|
Starts a frequency sweep.
:param start: Sets the start frequency.
:param stop: Sets the target frequency.
:param step: Sets the frequency step.
|
def get_action_fields(self, view, action_name, resource):
serializer = view.get_serializer(resource)
fields = OrderedDict()
if not isinstance(serializer, view.serializer_class) or action_name == 'update':
fields = self.get_fields(serializer.fields)
return fields
|
Get fields exposed by action's serializer
|
def _run_producer_wrapper(self):
try:
yield from self._producer.process()
except Exception as error:
if not isinstance(error, StopIteration):
_logger.debug('Producer died.', exc_info=True)
self.stop()
raise
else:
self.stop()
|
Run the producer, if exception, stop engine.
|
def save(params, filename, source):
writer = wave.open(filename, 'wb');
writer.setnchannels(1)
writer.setsampwidth(2)
writer.setframerate(params.sample_rate)
data_out = array.array('h')
for x in source:
data_out.append(int(x * 32766))
writer.writeframes(data_out.tostring())
writer.close()
|
Write a sequence of samples as a WAV file
Currently a 16 bit mono file
|
def go_to_next_cell(self):
cursor = self.textCursor()
cursor.movePosition(QTextCursor.NextBlock)
cur_pos = prev_pos = cursor.position()
while not self.is_cell_separator(cursor):
cursor.movePosition(QTextCursor.NextBlock)
prev_pos = cur_pos
cur_pos = cursor.position()
if cur_pos == prev_pos:
return
self.setTextCursor(cursor)
|
Go to the next cell of lines
|
def b_operator(self, P):
r
A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta
S1 = Q + beta * dot(B.T, dot(P, B))
S2 = beta * dot(B.T, dot(P, A))
S3 = beta * dot(A.T, dot(P, A))
F = solve(S1, S2) if not self.pure_forecasting else np.zeros(
(self.k, self.n))
new_P = R - dot(S2.T, F) + S3
return F, new_P
|
r"""
The B operator, mapping P into
.. math::
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
and also returning
.. math::
F := (Q + \beta B'PB)^{-1} \beta B'PA
Parameters
----------
P : array_like(float, ndim=2)
A matrix that should be n x n
Returns
-------
F : array_like(float, ndim=2)
The F matrix as defined above
new_p : array_like(float, ndim=2)
The matrix P after applying the B operator
|
def _check_min_density(self, min_density):
if min_density is None:
self._min_density = -np.Inf
elif (isinstance(min_density, float) and (0.0 <= min_density < 1.0)):
self._min_density = min_density
else:
raise ValueError('min_density must be float and be >=0.0 and < 1.0')
|
Validator to ensure proper usage.
|
def selected(self):
query_results = self.map(lambda el: el.is_selected(), 'selected').results
if query_results:
return all(query_results)
return False
|
Check whether all the matched elements are selected.
Returns:
bool
|
def getSubgraphFieldCount(self, parent_name, graph_name):
graph = self._getSubGraph(parent_name, graph_name, True)
return graph.getFieldCount()
|
Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
|
def pretty(self, indent=1, debug=False):
debug_details = ''
if debug:
debug_details += '<isliteral=%r, iscanonical=%r>' % (self.isliteral, self.iscanonical)
if self.isliteral:
pretty_literal = self.args[0].pretty(indent=0, debug=debug)
return (' ' * indent) + '%s(%s%s)' % (self.__class__.__name__, debug_details, pretty_literal)
else:
return super(NOT, self).pretty(indent=indent, debug=debug)
|
Return a pretty formatted representation of self.
Include additional debug details if `debug` is True.
|
def is_distributed(partition_column, lower_bound, upper_bound):
if (
(partition_column is not None)
and (lower_bound is not None)
and (upper_bound is not None)
):
if upper_bound > lower_bound:
return True
else:
raise InvalidArguments("upper_bound must be greater than lower_bound.")
elif (partition_column is None) and (lower_bound is None) and (upper_bound is None):
return False
else:
raise InvalidArguments(
"Invalid combination of partition_column, lower_bound, upper_bound."
"All these arguments should be passed (distributed) or none of them (standard pandas)."
)
|
Check if is possible distribute a query given that args
Args:
partition_column: column used to share the data between the workers
lower_bound: the minimum value to be requested from the partition_column
upper_bound: the maximum value to be requested from the partition_column
Returns:
True for distributed or False if not
|
def startswith_strip(s, startswith='http://', ignorecase=True):
if ignorecase:
if s.lower().startswith(startswith.lower()):
return s[len(startswith):]
else:
if s.endswith(startswith):
return s[len(startswith):]
return s
|
Strip a prefix from the beginning of a string
>>> startswith_strip('HTtp://TotalGood.com', 'HTTP://')
'TotalGood.com'
>>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False)
'HTtp://TotalGood.com'
|
def date(self, date):
self._occurrence_data['date'] = self._utils.format_datetime(
date, date_format='%Y-%m-%dT%H:%M:%SZ'
)
|
Set File Occurrence date.
|
def cpu_count(logical=True):
if logical:
from multiprocessing import cpu_count
ncpu=cpu_count()
else:
import psutil
ncpu=psutil.cpu_count(logical=False)
return ncpu
|
Return system CPU count
|
def ok(self, *args, cb=None):
self.clear_widgets()
if cb:
cb()
self.idx += 1
self.advance_dialog()
|
Clear dialog widgets, call ``cb`` if provided, and advance the dialog queue
|
def FileFinderOSFromClient(args):
stat_cache = filesystem.StatCache()
opts = args.action.stat
for path in GetExpandedPaths(args):
try:
content_conditions = conditions.ContentCondition.Parse(args.conditions)
for content_condition in content_conditions:
with io.open(path, "rb") as fd:
result = list(content_condition.Search(fd))
if not result:
raise _SkipFileException()
stat = stat_cache.Get(path, follow_symlink=bool(opts.resolve_links))
stat_entry = client_utils.StatEntryFromStatPathSpec(
stat, ext_attrs=opts.collect_ext_attrs)
yield stat_entry
except _SkipFileException:
pass
|
This function expands paths from the args and returns related stat entries.
Args:
args: An `rdf_file_finder.FileFinderArgs` object.
Yields:
`rdf_paths.PathSpec` instances.
|
def get_subj_alt_name(peer_cert):
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
cert = _Certificate(openssl_backend, peer_cert._x509)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
return []
except (x509.DuplicateExtension, UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
names = [
('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
if name is not None
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
|
Given an PyOpenSSL certificate, provides all the subject alternative names.
|
def vectorize_raw(audio: np.ndarray) -> np.ndarray:
if len(audio) == 0:
raise InvalidAudio('Cannot vectorize empty audio!')
return vectorizers[pr.vectorizer](audio)
|
Turns audio into feature vectors, without clipping for length
|
def check_hmc_diagnostics(fit, pars=None, verbose=True, per_chain=False, checks=None):
verbosity = int(verbose)
all_checks = {"n_eff", "Rhat", "divergence", "treedepth", "energy"}
if checks is None:
checks = all_checks
else:
undefined_checks = []
for c in checks:
if c == "rhat":
continue
if c not in all_checks:
undefined_checks.append(c)
if undefined_checks:
ucstr = "[" + ", ".join(undefined_checks) + "]"
msg = "checks: {} are not legal checks: {}".format(ucstr, all_checks)
raise TypeError(msg)
out_dict = {}
if "n_eff" in checks:
try:
out_dict['n_eff'] = check_n_eff(fit, pars, verbose)
except ValueError:
if verbosity > 0:
logger.warning('Skipping check of effective sample size (n_eff)')
if ("Rhat" in checks) or ("rhat" in checks):
try:
out_dict['Rhat'] = check_rhat(fit, pars, verbose)
except ValueError:
if verbosity > 0:
logger.warning('Skipping check of potential scale reduction factors (Rhat)')
if "divergence" in checks:
try:
out_dict['divergence'] = check_div(fit, verbose, per_chain)
except ValueError:
if verbosity > 0:
logger.warning('Skipping check of divergent transitions (divergence)')
if "treedepth" in checks:
try:
out_dict['treedepth'] = check_treedepth(fit, verbose, per_chain)
except ValueError:
if verbosity > 0:
logger.warning('Skipping check of transitions ending prematurely due to maximum tree depth limit (treedepth)')
if "energy" in checks:
try:
out_dict['energy'] = check_energy(fit, verbose)
except ValueError:
if verbosity > 0:
logger.warning('Skipping check of E-BFMI (energy)')
return out_dict
|
Checks all hmc diagnostics
Parameters
----------
fit : StanFit4Model object
verbose : bool or int, optional
If ``verbose`` is ``False`` or a nonpositive integer, no
diagnostic messages are printed, and only the return value of
the function conveys diagnostic information. If it is ``True``
(the default) or an integer greater than zero, then diagnostic
messages are printed only for diagnostic checks that fail. If
``verbose`` is an integer greater than 1, then parameter
(quantile) diagnostics are printed. If ``verbose`` is
greater than 2, then extra diagnostic messages are printed.
per_chain : bool, optional
Where applicable, print diagnostics on a per-chain basis. This
applies mainly to the divergence and treedepth checks.
checks : list, {"n_eff", "Rhat", "divergence", "treedepth", "energy"}, optional
By default run all checks. If ``checks`` is defined, run only
checks given in ``checks``
Returns
-------
out_dict : dict
A dictionary where each key is the name of a diagnostic check,
and the value associated with each key is a Boolean value that
is True if the check passed and False otherwise. Possible
valid keys are 'n_eff', 'Rhat', 'divergence', 'treedepth', and
'energy', though which keys are available will depend upon the
sampling algorithm used.
|
def wait_for_redis_to_start(redis_ip_address,
redis_port,
password=None,
num_retries=5):
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port, password=password)
counter = 0
while counter < num_retries:
try:
logger.info(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError:
time.sleep(1)
logger.info("Failed to connect to the redis server, retrying.")
counter += 1
else:
break
if counter == num_retries:
raise Exception("Unable to connect to Redis. If the Redis instance is "
"on a different machine, check that your firewall is "
"configured properly.")
|
Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
|
def prune_by_ngram(self, ngrams):
self._logger.info('Pruning results by n-gram')
self._matches = self._matches[
~self._matches[constants.NGRAM_FIELDNAME].isin(ngrams)]
|
Removes results rows whose n-gram is in `ngrams`.
:param ngrams: n-grams to remove
:type ngrams: `list` of `str`
|
def _scons_user_warning(e):
etype, value, tb = sys.exc_info()
filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb))
sys.stderr.write("\nscons: warning: %s\n" % e)
sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
|
Handle user warnings. Print out a message and a description of
the warning, along with the line number and routine where it occured.
The file and line number will be the deepest stack frame that is
not part of SCons itself.
|
def probabilities(self, choosers, alternatives, filter_tables=True):
logger.debug('start: calculate probabilities for LCM model {}'.format(
self.name))
self.assert_fitted()
if filter_tables:
choosers, alternatives = self.apply_predict_filters(
choosers, alternatives)
if self.prediction_sample_size is not None:
sample_size = self.prediction_sample_size
else:
sample_size = len(alternatives)
if self.probability_mode == 'single_chooser':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers.head(1), alternatives, sample_size)
elif self.probability_mode == 'full_product':
_, merged, _ = interaction.mnl_interaction_dataset(
choosers, alternatives, sample_size)
else:
raise ValueError(
'Unrecognized probability_mode option: {}'.format(
self.probability_mode))
merged = util.apply_filter_query(
merged, self.interaction_predict_filters)
model_design = dmatrix(
self.str_model_expression, data=merged, return_type='dataframe')
if len(merged) != model_design.as_matrix().shape[0]:
raise ModelEvaluationError(
'Simulated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
coeffs = [self.fit_parameters['Coefficient'][x]
for x in model_design.columns]
if self.probability_mode == 'single_chooser':
numalts = len(merged)
else:
numalts = sample_size
probabilities = mnl.mnl_simulate(
model_design.as_matrix(),
coeffs,
numalts=numalts, returnprobs=True)
mi = pd.MultiIndex.from_arrays(
[merged['join_index'].values, merged.index.values],
names=('chooser_id', 'alternative_id'))
probabilities = pd.Series(probabilities.flatten(), index=mi)
logger.debug('finish: calculate probabilities for LCM model {}'.format(
self.name))
return probabilities
|
Returns the probabilities for a set of choosers to choose
from among a set of alternatives.
Parameters
----------
choosers : pandas.DataFrame
Table describing the agents making choices, e.g. households.
alternatives : pandas.DataFrame
Table describing the things from which agents are choosing.
filter_tables : bool, optional
If True, filter `choosers` and `alternatives` with prediction
filters before calculating probabilities.
Returns
-------
probabilities : pandas.Series
Probability of selection associated with each chooser
and alternative. Index will be a MultiIndex with alternative
IDs in the inner index and chooser IDs in the out index.
|
def _get_simpx_plane(self):
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
|
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
|
def get_timeline(source):
timeline_format = ["name", "type", "source", "country", "city", "latitude",
"longitude", "website_url", "twitter_url",
"facebook_page_url", "facebook_group_url",
"whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end",
"facebook_start", "facebook_end"]
timeline = pd.DataFrame(timeline_format)
if source.lower() == "diybio.org":
data = diybio_org.get_labs(format="dict")
elif source.lower() == "fablabs_io":
data = fablabs_io.get_labs(format="dict")
elif source.lower() == "makeinitaly_foundation":
data = makeinitaly_foundation.get_labs(format="dict")
elif source.lower() == "hackaday_io":
data = hackaday_io.get_labs(format="dict")
elif source.lower() == "hackerspaces_org":
data = hackerspaces_org.get_labs(format="dict")
elif source.lower() == "makery_info":
data = makery_info.get_labs(format="dict")
elif source.lower() == "nesta":
data = nesta.get_labs(format="dict")
elif source.lower() == "all":
pass
for lab in labs_data:
for link in lab.links:
print link
if "twitter" in link:
print link
if "facebook" in link:
print link
lab_dataframe_dict = {"name": lab.name,
"type": lab.lab_type,
"source": lab.source,
"country": lab.country,
"city": lab.city,
"latitude": lab.latitude,
"longitude": lab.longitude,
"website_url": lab.url}
timeline.append(lab_dataframe_dict)
["name", "type", "source", "country", "city", "lat", "long",
"website_url", "twitter_url", "facebook_page_url",
"facebook_group_url", "whois_start", "whois_end", "wayback_start",
"wayback_end", "twitter_start", "twitter_end", "facebook_start",
"facebook_end"]
return timeline
|
Rebuild a timeline of the history of makerlabs.
|
def setJobGroup(self, groupId, description, interruptOnCancel=False):
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
|
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
|
def verify_is_not(self, first, second, msg=None):
try:
self.assert_is_not(first, second, msg)
except AssertionError, e:
if msg:
m = "%s:\n%s" % (msg, str(e))
else:
m = str(e)
self.verification_erorrs.append(m)
|
Soft assert for whether the parameters do not evaluate to the same object
:params want: the object to compare against
:params second: the object to compare with
:params msg: (Optional) msg explaining the difference
|
def _get_service_bindings(self, service_name):
instance = self.get_instance(service_name)
return self.api.get(instance['service_bindings_url'])
|
Return the service bindings for the service instance.
|
def _FindLargestIdPostfixNumber(self, schedule):
postfix_number_re = re.compile('(\d+)$')
def ExtractPostfixNumber(entity_id):
if entity_id is None:
return 0
match = postfix_number_re.search(entity_id)
if match is not None:
return int(match.group(1))
else:
return 0
id_data_sets = {'agency_id': schedule.GetAgencyList(),
'stop_id': schedule.GetStopList(),
'route_id': schedule.GetRouteList(),
'trip_id': schedule.GetTripList(),
'service_id': schedule.GetServicePeriodList(),
'fare_id': schedule.GetFareAttributeList(),
'shape_id': schedule.GetShapeList()}
max_postfix_number = 0
for id_name, entity_list in id_data_sets.items():
for entity in entity_list:
entity_id = getattr(entity, id_name)
postfix_number = ExtractPostfixNumber(entity_id)
max_postfix_number = max(max_postfix_number, postfix_number)
return max_postfix_number
|
Finds the largest integer used as the ending of an id in the schedule.
Args:
schedule: The schedule to check.
Returns:
The maximum integer used as an ending for an id.
|
def get_item(self, item_type, id):
url = 'data/v1/item-types/%s/items/%s' % (item_type, id)
return self._get(url).get_body()
|
Get the an item response for the given item_type and id
:param item_type str: A valid item-type
:param id str: The id of the item
:returns: :py:Class:`planet.api.models.JSON`
:raises planet.api.exceptions.APIException: On API error.
|
def parseCommandLine(self, line):
try:
optlist, args = getopt.getopt(line, self._optlist(), self._longoptl())
except getopt.GetoptError, err:
print str(err)
self.usage()
sys.exit(2)
for opt, arg in optlist:
opt = opt.lstrip("-")
found = False
for option in self.options:
if option.short == opt or option.long == opt:
option.value = option.type(arg)
option.set = True
found = True
if not found:
sys.stderr.write("unknown option: " + opt)
self.usage()
sys.exit(2)
specialOptions = False
for opt in self.options:
if opt.special and opt.isSet():
specialOptions = True
if len(args) < self.minArgs and not specialOptions:
err = (str(len(args)) + " input arguments found, but required at " +
"least " + str(self.minArgs) + " arguments")
sys.stderr.write(err)
sys.stderr.write("\n\n")
self.usage()
sys.exit(2)
else:
self.args = args
for option in self.options:
if option.isRequired() and not option.isSet() and not specialOptions:
err = "required option \'" + str(option) + "\' is missing"
sys.stderr.write(err)
print "\n"
self.usage()
sys.exit(2)
|
Parse the given command line and populate self with the values found.
If the command line doesn't conform to the specification defined
by this CLI object, this function prints a message to stdout indicating
what was wrong, prints the program usage instructions and then exists
the program
:param line: list of tokens from the command line. Should have had program
name removed - generally one would do this: sys.argv[1:]
|
def set_sleep_timer(self, sleep_time_seconds):
try:
if sleep_time_seconds is None:
sleep_time = ''
else:
sleep_time = format(
datetime.timedelta(seconds=int(sleep_time_seconds))
)
self.avTransport.ConfigureSleepTimer([
('InstanceID', 0),
('NewSleepTimerDuration', sleep_time),
])
except SoCoUPnPException as err:
if 'Error 402 received' in str(err):
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
raise
except ValueError:
raise ValueError('invalid sleep_time_seconds, must be integer \
value between 0 and 86399 inclusive or None')
|
Sets the sleep timer.
Args:
sleep_time_seconds (int or NoneType): How long to wait before
turning off speaker in seconds, None to cancel a sleep timer.
Maximum value of 86399
Raises:
SoCoException: Upon errors interacting with Sonos controller
ValueError: Argument/Syntax errors
|
def json_to_csv(json_input):
try:
json_input = json.loads(json_input)
except:
pass
headers = set()
for json_row in json_input:
headers.update(json_row.keys())
csv_io = StringIO.StringIO()
csv_out = csv.DictWriter(csv_io,headers)
csv_out.writeheader()
for json_row in json_input:
csv_out.writerow(json_row)
csv_io.seek(0)
return csv_io.read()
|
Convert simple JSON to CSV
Accepts a JSON string or JSON object
|
def store(self):
if msgpack is None:
log.error('Cache cannot be stored on disk: msgpack is missing')
else:
try:
with salt.utils.files.fopen(self._path, 'wb+') as fp_:
cache = {
"CacheDisk_data": self._dict,
"CacheDisk_cachetime": self._key_cache_time
}
msgpack.dump(cache, fp_, use_bin_type=True)
except (IOError, OSError) as err:
log.error('Error storing cache data to the disk: %s', err)
|
Write content of the entire cache to disk
|
def get(sub_array_id):
if not re.match(r'^subarray-0[0-9]|subarray-1[0-5]$', sub_array_id):
response = dict(error='Invalid sub-array ID specified "{}" does not '
'match sub-array ID naming convention '
'(ie. subarray-[00-15]).'.
format(sub_array_id))
return response, HTTPStatus.BAD_REQUEST
if sub_array_id not in DB.get_sub_array_ids():
response = dict(error='Sub-array "{}" does not currently exist. '
'Known sub-arrays = {}'
.format(sub_array_id, DB.get_sub_array_ids()))
return response, HTTPStatus.NOT_FOUND
block_ids = DB.get_sub_array_sbi_ids(sub_array_id)
_blocks = [b for b in DB.get_block_details(block_ids)]
response = dict(scheduling_blocks=[])
_url = get_root_url()
for block in _blocks:
block['links'] = {
'self': '{}/scheduling-block/{}'.format(_url, block['id'])
}
response['scheduling_blocks'].append(block)
response['links'] = {
'self': '{}'.format(request.url),
'list': '{}/sub-arrays'.format(_url),
'home': '{}'.format(_url),
}
return response, HTTPStatus.OK
|
Sub array detail resource.
This method will list scheduling blocks and processing blocks
in the specified sub-array.
|
def set_id(self,i):
if self.type == 'NAF':
self.node.set('id',i)
elif self.type == 'KAF':
self.node.set('mid',i)
|
Sets the identifier for the term
@type i: string
@param i: lemma identifier
|
def moothedata(data, key=None):
if not key:
key = choice(list(data.keys()))
logger.debug("Using randomly chosen key: %s", key)
msg = cow.Moose().milk("{0}: {1}".format(key.capitalize(), data[key]))
return msg
|
Return an amusing picture containing an item from a dict.
Parameters
----------
data: mapping
A mapping, such as a raster dataset's ``meta`` or ``profile``
property.
key:
A key of the ``data`` mapping.
|
def from_set(cls, database, key, data, clear=False):
s = cls(database, key)
if clear:
s.clear()
s.add(*data)
return s
|
Create and populate a Set object from a data set.
|
def _detect_encoding(data=None):
import locale
enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2',
'utf-16', 'cp720']
code = locale.getpreferredencoding(False)
if data is None:
return code
if code.lower() not in enc_list:
enc_list.insert(0, code.lower())
for c in enc_list:
try:
for line in data:
line.decode(c)
except (UnicodeDecodeError, UnicodeError, AttributeError):
continue
return c
print("Encoding not detected. Please pass encoding value manually")
|
Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding
|
def clip(dataset, normal='x', origin=None, invert=True):
if isinstance(normal, str):
normal = NORMALS[normal.lower()]
if origin is None:
origin = dataset.center
plane = _generate_plane(normal, origin)
alg = vtk.vtkClipDataSet()
alg.SetInputDataObject(dataset)
alg.SetClipFunction(plane)
alg.SetInsideOut(invert)
alg.Update()
return _get_output(alg)
|
Clip a dataset by a plane by specifying the origin and normal. If no
parameters are given the clip will occur in the center of that dataset
Parameters
----------
normal : tuple(float) or str
Length 3 tuple for the normal vector direction. Can also be
specified as a string conventional direction such as ``'x'`` for
``(1,0,0)`` or ``'-x'`` for ``(-1,0,0)``, etc.
origin : tuple(float)
The center ``(x,y,z)`` coordinate of the plane on which the clip
occurs
invert : bool
Flag on whether to flip/invert the clip
|
def _magic_parser(stream, magic):
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = OrderedDict((k, []) for k in keys)
if fields is not None:
in_doc += 1
if in_doc == 1:
continue
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
if fields:
return OrderedDict([(k, np.array(v)) for k, v in fields.items()])
else:
return None
|
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
|
def _filter_cache(self, dmap, kdims):
filtered = []
for key, value in dmap.data.items():
if not any(kd.values and v not in kd.values for kd, v in zip(kdims, key)):
filtered.append((key, value))
return filtered
|
Returns a filtered version of the DynamicMap cache leaving only
keys consistently with the newly specified values
|
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
|
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
|
def _get_nblock_regions(in_file, min_n_size, ref_regions):
out_lines = []
called_contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
contig, start, end, ctype = line.rstrip().split()
called_contigs.add(contig)
if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and
int(end) - int(start) > min_n_size):
out_lines.append("%s\t%s\t%s\n" % (contig, start, end))
for refr in ref_regions:
if refr.chrom not in called_contigs:
out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop))
return pybedtools.BedTool("\n".join(out_lines), from_string=True)
|
Retrieve coordinates of regions in reference genome with no mapping.
These are potential breakpoints for parallelizing analysis.
|
def parse_paramvalue(self, tup_tree):
self.check_node(tup_tree, 'PARAMVALUE', ('NAME',),
('TYPE', 'PARAMTYPE', 'EmbeddedObject',
'EMBEDDEDOBJECT'))
child = self.optional_child(tup_tree,
('VALUE', 'VALUE.REFERENCE', 'VALUE.ARRAY',
'VALUE.REFARRAY', 'CLASSNAME',
'INSTANCENAME', 'CLASS', 'INSTANCE',
'VALUE.NAMEDINSTANCE'))
attrl = attrs(tup_tree)
if 'PARAMTYPE' in attrl:
paramtype = attrl['PARAMTYPE']
elif 'TYPE' in attrl:
paramtype = attrl['TYPE']
else:
paramtype = None
if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl:
child = self.parse_embeddedObject(child)
return attrl['NAME'], paramtype, child
|
Parse PARAMVALUE element.
::
<!ELEMENT PARAMVALUE (VALUE | VALUE.REFERENCE | VALUE.ARRAY |
VALUE.REFARRAY | CLASSNAME | INSTANCENAME |
CLASS | INSTANCE | VALUE.NAMEDINSTANCE)?>
<!ATTLIST PARAMVALUE
%CIMName;
%ParamType; #IMPLIED
%EmbeddedObject;>
|
def from_array(array):
if array is None or not array:
return None
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.inline import ChosenInlineResult
from pytgbot.api_types.receivable.inline import InlineQuery
from pytgbot.api_types.receivable.payments import PreCheckoutQuery
from pytgbot.api_types.receivable.payments import ShippingQuery
from pytgbot.api_types.receivable.updates import CallbackQuery
from pytgbot.api_types.receivable.updates import Message
data = {}
data['update_id'] = int(array.get('update_id'))
data['message'] = Message.from_array(array.get('message')) if array.get('message') is not None else None
data['edited_message'] = Message.from_array(array.get('edited_message')) if array.get('edited_message') is not None else None
data['channel_post'] = Message.from_array(array.get('channel_post')) if array.get('channel_post') is not None else None
data['edited_channel_post'] = Message.from_array(array.get('edited_channel_post')) if array.get('edited_channel_post') is not None else None
data['inline_query'] = InlineQuery.from_array(array.get('inline_query')) if array.get('inline_query') is not None else None
data['chosen_inline_result'] = ChosenInlineResult.from_array(array.get('chosen_inline_result')) if array.get('chosen_inline_result') is not None else None
data['callback_query'] = CallbackQuery.from_array(array.get('callback_query')) if array.get('callback_query') is not None else None
data['shipping_query'] = ShippingQuery.from_array(array.get('shipping_query')) if array.get('shipping_query') is not None else None
data['pre_checkout_query'] = PreCheckoutQuery.from_array(array.get('pre_checkout_query')) if array.get('pre_checkout_query') is not None else None
data['_raw'] = array
return Update(**data)
|
Deserialize a new Update from a given dictionary.
:return: new Update instance.
:rtype: Update
|
def split_model(model:nn.Module=None, splits:Collection[Union[nn.Module,ModuleList]]=None):
"Split `model` according to the layers in `splits`."
splits = listify(splits)
if isinstance(splits[0], nn.Module):
layers = flatten_model(model)
idxs = [layers.index(first_layer(s)) for s in splits]
return split_model_idx(model, idxs)
return [nn.Sequential(*s) for s in splits]
|
Split `model` according to the layers in `splits`.
|
def _file_has_tag_anchor_keypair(self, anchors, file_key, tag):
return file_key in anchors and tag in anchors[file_key]
|
Is there an AnchorHub tag, 'tag', registered for file 'file_key' in
'anchors'?
:param anchors: Dictionary mapping string file paths to inner
dictionaries. These inner dictionaries map string AnchorHub tags
to string generated anchors
:param file_key: The absolute path to the file that may or may not
have the AnchorHub tag in it. Used as a key to anchors
:param tag: The string being tested
:return: True if tag is a valid AnchorHub tag in the file associated
with 'file_key'
|
def to_xdr_amount(value):
if not isinstance(value, str):
raise NotValidParamError("Value of type '{}' must be of type String, but got {}".format(value, type(value)))
try:
amount = int((Decimal(value) * ONE).to_integral_exact(context=Context(traps=[Inexact])))
except decimal.Inexact:
raise NotValidParamError("Value of '{}' must have at most 7 digits after the decimal.".format(value))
except decimal.InvalidOperation:
raise NotValidParamError("Value of '{}' must represent a positive number.".format(value))
return amount
|
Converts an amount to the appropriate value to send over the network
as a part of an XDR object.
Each asset amount is encoded as a signed 64-bit integer in the XDR
structures. An asset amount unit (that which is seen by end users) is
scaled down by a factor of ten million (10,000,000) to arrive at the
native 64-bit integer representation. For example, the integer amount
value 25,123,456 equals 2.5123456 units of the asset. This scaling
allows for seven decimal places of precision in human-friendly amount
units.
This static method correctly multiplies the value by the scaling factor
in order to come to the integer value used in XDR structures.
See `Stellar's documentation on Asset Precision
<https://www.stellar.org/developers/guides/concepts/assets.html#amount-precision-and-representation>`_
for more information.
:param str value: The amount to convert to an integer for XDR
serialization.
|
def _is_readable(self, obj):
try:
read = getattr(obj, 'read')
except AttributeError:
return False
else:
return is_method(read, max_arity=1)
|
Check if the argument is a readable file-like object.
|
def optimize_svc(data, targets):
def svc_crossval(expC, expGamma):
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
|
Apply Bayesian Optimization to SVC parameters.
|
def search(self, query):
logger.info("Catalog query={}".format(query))
catalogs = _.to_list(req.get("catalog", None))
if catalogs:
return senaiteapi.search(query, catalog=catalogs)
return senaiteapi.search(query)
|
search the catalog
|
def _setup_tf(self, stream=False):
if self.tf_version < (0, 9, 0):
self._set_remote(stream=stream)
return
self._run_tf('init', stream=stream)
logger.info('Terraform initialized')
|
Setup terraform; either 'remote config' or 'init' depending on version.
|
def construct(self, request, service=None, http_args=None, **kwargs):
if 'client_assertion' in kwargs:
request["client_assertion"] = kwargs['client_assertion']
if 'client_assertion_type' in kwargs:
request[
'client_assertion_type'] = kwargs['client_assertion_type']
else:
request["client_assertion_type"] = JWT_BEARER
elif 'client_assertion' in request:
if 'client_assertion_type' not in request:
request["client_assertion_type"] = JWT_BEARER
else:
algorithm = None
_context = service.service_context
if kwargs['authn_endpoint'] in ['token_endpoint']:
try:
algorithm = _context.behaviour[
'token_endpoint_auth_signing_alg']
except (KeyError, AttributeError):
pass
audience = _context.provider_info['token_endpoint']
else:
audience = _context.provider_info['issuer']
if not algorithm:
algorithm = self.choose_algorithm(**kwargs)
ktype = alg2keytype(algorithm)
try:
if 'kid' in kwargs:
signing_key = [self.get_key_by_kid(kwargs["kid"], algorithm,
_context)]
elif ktype in _context.kid["sig"]:
try:
signing_key = [self.get_key_by_kid(
_context.kid["sig"][ktype], algorithm, _context)]
except KeyError:
signing_key = self.get_signing_key(algorithm, _context)
else:
signing_key = self.get_signing_key(algorithm, _context)
except NoMatchingKey as err:
logger.error("%s" % sanitize(err))
raise
try:
_args = {'lifetime': kwargs['lifetime']}
except KeyError:
_args = {}
request["client_assertion"] = assertion_jwt(
_context.client_id, signing_key, audience,
algorithm, **_args)
request["client_assertion_type"] = JWT_BEARER
try:
del request["client_secret"]
except KeyError:
pass
if not request.c_param["client_id"][VREQUIRED]:
try:
del request["client_id"]
except KeyError:
pass
return {}
|
Constructs a client assertion and signs it with a key.
The request is modified as a side effect.
:param request: The request
:param service: A :py:class:`oidcservice.service.Service` instance
:param http_args: HTTP arguments
:param kwargs: Extra arguments
:return: Constructed HTTP arguments, in this case none
|
def notify3_d_event(self, type_p, data):
if not isinstance(type_p, baseinteger):
raise TypeError("type_p can only be an instance of type baseinteger")
if not isinstance(data, list):
raise TypeError("data can only be an instance of type list")
for a in data[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array can only contain objects of type basestring")
self._call("notify3DEvent",
in_p=[type_p, data])
|
Notifies framebuffer about 3D backend event.
in type_p of type int
event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported.
in data of type str
event-specific data, depends on the supplied event type
|
def _check_device_number(self, devices):
if len(devices) < 2 or len(devices) > 4:
msg = 'The number of devices to cluster is not supported.'
raise ClusterNotSupported(msg)
|
Check if number of devices is between 2 and 4
:param kwargs: dict -- keyword args in dict
|
def _describe_bitmask(
bits: int, table: Dict[Any, str], default: str = "0"
) -> str:
result = []
for bit, name in table.items():
if bit & bits:
result.append(name)
if not result:
return default
return "|".join(result)
|
Returns a bitmask in human readable form.
This is a private function, used internally.
Args:
bits (int): The bitmask to be represented.
table (Dict[Any,str]): A reverse lookup table.
default (Any): A default return value when bits is 0.
Returns: str: A printable version of the bits variable.
|
async def make_response(self, result: ResponseReturnValue) -> Response:
status_or_headers = None
headers = None
status = None
if isinstance(result, tuple):
value, status_or_headers, headers = result + (None,) * (3 - len(result))
else:
value = result
if value is None:
raise TypeError('The response value returned by the view function cannot be None')
if isinstance(status_or_headers, (dict, list)):
headers = status_or_headers
status = None
elif status_or_headers is not None:
status = status_or_headers
if not isinstance(value, Response):
response = self.response_class(
value, timeout=self.config['RESPONSE_TIMEOUT'],
)
else:
response = value
if status is not None:
response.status_code = status
if headers is not None:
response.headers.update(headers)
return response
|
Make a Response from the result of the route handler.
The result itself can either be:
- A Response object (or subclass).
- A tuple of a ResponseValue and a header dictionary.
- A tuple of a ResponseValue, status code and a header dictionary.
A ResponseValue is either a Response object (or subclass) or a str.
|
def get_local_property(elt, key, default=None, ctx=None):
result = default
local_properties = get_local_properties(elt=elt, keys=(key,), ctx=ctx)
if key in local_properties:
result = local_properties[key]
return result
|
Get one local property related to one input key or default value if key
is not found.
:param elt: local property elt. Not None methods.
:param str key: property key to get.
:param default: default value to return if key does not exist in elt
properties.
:param ctx: elt ctx from where get properties. Equals elt if None. It
allows to get function properties related to a class or instance if
related function is defined in base class.
:return: dict of properties by name.
:rtype: dict
|
def deploy_gateway(collector):
configuration = collector.configuration
aws_syncr = configuration['aws_syncr']
aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)
gateway.deploy(aws_syncr, amazon, stage)
if not configuration['amazon'].changes:
log.info("No changes were made!!")
|
Deploy the apigateway to a particular stage
|
def scan (data, clamconf):
try:
scanner = ClamdScanner(clamconf)
except socket.error:
errmsg = _("Could not connect to ClamAV daemon.")
return ([], [errmsg])
try:
scanner.scan(data)
finally:
scanner.close()
return scanner.infected, scanner.errors
|
Scan data for viruses.
@return (infection msgs, errors)
@rtype ([], [])
|
def _get_super_entities_by_ctype(model_objs_by_ctype, model_ids_to_sync, sync_all):
super_entities_by_ctype = defaultdict(lambda: defaultdict(list))
for ctype, model_objs_for_ctype in model_objs_by_ctype.items():
entity_config = entity_registry.entity_registry.get(ctype.model_class())
super_entities = entity_config.get_super_entities(model_objs_for_ctype, sync_all)
super_entities_by_ctype[ctype] = {
ContentType.objects.get_for_model(model_class, for_concrete_model=False): relationships
for model_class, relationships in super_entities.items()
}
for super_entity_ctype, relationships in super_entities_by_ctype[ctype].items():
for sub_entity_id, super_entity_id in relationships:
model_ids_to_sync[ctype].add(sub_entity_id)
model_ids_to_sync[super_entity_ctype].add(super_entity_id)
return super_entities_by_ctype
|
Given model objects organized by content type and a dictionary of all model IDs that need
to be synced, organize all super entity relationships that need to be synced.
Ensure that the model_ids_to_sync dict is updated with any new super entities
that need to be part of the overall entity sync
|
def csview(self, view=False):
for starfile in fileio.read_files(self.from_path):
chains = starfile.chem_shifts_by_residue(amino_acids=self.amino_acids,
atoms=self.atoms,
amino_acids_and_atoms=self.amino_acids_and_atoms,
nmrstar_version=self.nmrstar_version)
for idx, chemshifts_dict in enumerate(chains):
nodes = []
edges = []
for seq_id in chemshifts_dict:
aaname = "{}_{}".format(chemshifts_dict[seq_id]["AA3Code"], seq_id)
label = '"{{{}|{}}}"'.format(seq_id, chemshifts_dict[seq_id]["AA3Code"])
color = 8
aanode_entry = " {} [label={}, fillcolor={}]".format(aaname, label, color)
nodes.append(aanode_entry)
currnodename = aaname
for atom_type in chemshifts_dict[seq_id]:
if atom_type in ["AA3Code", "Seq_ID"]:
continue
else:
atname = "{}_{}".format(aaname, atom_type)
label = '"{{{}|{}}}"'.format(atom_type, chemshifts_dict[seq_id][atom_type])
if atom_type.startswith("H"):
color = 4
elif atom_type.startswith("C"):
color = 6
elif atom_type.startswith("N"):
color = 10
else:
color = 8
atnode_entry = "{} [label={}, fillcolor={}]".format(atname, label, color)
nextnodename = atname
nodes.append(atnode_entry)
edges.append("{} -> {}".format(currnodename, nextnodename))
currnodename = nextnodename
if self.filename is None:
filename = "{}_{}".format(starfile.id, idx)
else:
filename = "{}_{}".format(self.filename, idx)
src = Source(self.dot_template.format("\n".join(nodes), "\n".join(edges)), format=self.csview_format)
src.render(filename=filename, view=view)
|
View chemical shift values organized by amino acid residue.
:param view: Open in default image viewer or save file in current working directory quietly.
:type view: :py:obj:`True` or :py:obj:`False`
:return: None
:rtype: :py:obj:`None`
|
def update_stress_mode(self):
self.stress_conroller.kill_stress_process()
self.view.clock_view.set_text(ZERO_TIME)
self.stress_start_time = timeit.default_timer()
if self.stress_conroller.get_current_mode() == 'Stress':
stress_cmd = self.view.stress_menu.get_stress_cmd()
self.stress_conroller.start_stress(stress_cmd)
elif self.stress_conroller.get_current_mode() == 'FIRESTARTER':
stress_cmd = [self.firestarter]
self.stress_conroller.start_stress(stress_cmd)
|
Updates stress mode according to radio buttons state
|
def calculate(bam_file, data, sv_bed):
params = {"min": dd.get_coverage_depth_min(data)}
variant_regions = dd.get_variant_regions_merged(data)
if not variant_regions:
variant_regions = _create_genome_regions(data)
callable_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align",
dd.get_sample_name(data))),
"%s-coverage.callable.bed" % (dd.get_sample_name(data)))
if not utils.file_uptodate(callable_file, bam_file):
vr_quantize = ("0:1:%s:" % (params["min"]), ["NO_COVERAGE", "LOW_COVERAGE", "CALLABLE"])
to_calculate = [("variant_regions", variant_regions,
vr_quantize, None, "coverage_perbase" in dd.get_tools_on(data)),
("sv_regions", bedutils.clean_file(sv_bed, data, prefix="svregions-"),
None, None, False),
("coverage", bedutils.clean_file(dd.get_coverage(data), data, prefix="cov-"),
None, DEPTH_THRESHOLDS, False)]
depth_files = {}
for target_name, region_bed, quantize, thresholds, per_base in to_calculate:
if region_bed:
cur_depth = {}
depth_info = run_mosdepth(data, target_name, region_bed, quantize=quantize, thresholds=thresholds,
per_base=per_base)
for attr in ("dist", "regions", "thresholds", "per_base"):
val = getattr(depth_info, attr, None)
if val:
cur_depth[attr] = val
depth_files[target_name] = cur_depth
if target_name == "variant_regions":
callable_file = depth_info.quantize
else:
depth_files = {}
final_callable = _subset_to_variant_regions(callable_file, variant_regions, data)
return final_callable, depth_files
|
Calculate coverage in parallel using mosdepth.
Removes duplicates and secondary reads from the counts:
if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue;
|
def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
known_router_fips = self._l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN)
|
Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value
|
def get_model_from_settings(settings_key):
cls_path = getattr(settings, settings_key, None)
if not cls_path:
raise NotImplementedError()
try:
app_label, model_name = cls_path.split('.')
except ValueError:
raise ImproperlyConfigured("{0} must be of the form "
"'app_label.model_name'".format(settings_key))
model = apps.get_model(app_label, model_name)
if model is None:
raise ImproperlyConfigured("{0} refers to model '%s' that has not "
"been installed".format(settings_key))
return model
|
Return the django model from a settings key.
This is the same pattern user for django's "get_user_model()" method. To
allow you to set the model instance to a different model subclass.
:param settings_key: the key defined in settings to the value for
|
def send_xml(self, text, code=200):
self.send_response(code)
if text:
self.send_header('Content-type', 'text/xml; charset="%s"' %UNICODE_ENCODING)
self.send_header('Content-Length', str(len(text)))
self.end_headers()
if text:
self.wfile.write(text)
self.wfile.flush()
|
Send some XML.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.