Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
1,200 | def generate(self):
self._setup()
header_html = self._generate_header()
footer_html = self._generate_footer()
results_topbar_html = self._generate_topbar("results")
summary_topbar_html = self._generate_topbar("summary")
logs_topbar_html = self._generate_topbar("logs")
diff_topbar_html = self._generate_topbar("diff")
summary_body_html = self._generate_summary_body()
diff_body_html = self._generate_diff_body()
summary_html = header_html + summary_topbar_html + summary_body_html + footer_html
diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html
Reporter._make_file(summary_html, self.report_info.home_page)
Reporter._make_file(diff_html,self.report_info.diff_page)
log_body_html = self._generate_log_body()
log_html = header_html + logs_topbar_html + log_body_html+footer_html
Reporter._make_file(log_html, self.report_info.log_page)
for config_name in self.report_info.config_to_test_names_map.keys():
config_dir = os.path.join(self.report_info.resource_dir, config_name)
utils.makedirs(config_dir)
config_body_html = self._generate_config_body(config_name)
config_html = header_html + results_topbar_html + config_body_html + footer_html
config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx)
Reporter._make_file(config_html, config_file)
for test_name in self.data_source.get_test_names(config_name):
test_body_html = self._generate_test_body(config_name, test_name)
test_html = header_html + results_topbar_html + test_body_html + footer_html
test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx)
Reporter._make_file(test_html, test_file) | Generates the report |
1,201 | def update(self, validate=False):
rs = self.connection.get_all_dbinstances(self.id)
if len(rs) > 0:
for i in rs:
if i.id == self.id:
self.__dict__.update(i.__dict__)
elif validate:
raise ValueError( % self.id)
return self.status | Update the DB instance's status information by making a call to fetch
the current instance attributes from the service.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
instance the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2. |
1,202 | def add_account_alias(self, account, alias):
self.request(, {
: self._get_or_fetch_id(account, self.get_account),
: alias,
}) | :param account: an account object to be used as a selector
:param alias: email alias address
:returns: None (the API itself returns nothing) |
1,203 | def get_api_name(self, func):
words = func.__name__.split()
words = [w.capitalize() for w in words]
return .join(words) | e.g. Convert 'do_work' to 'Do Work |
1,204 | def delete(group_id):
group = Group.query.get_or_404(group_id)
if group.can_edit(current_user):
try:
group.delete()
except Exception as e:
flash(str(e), "error")
return redirect(url_for(".index"))
flash(_(,
group_name=group.name), )
return redirect(url_for(".index"))
flash(
_(
,
group_name=group.name
),
)
return redirect(url_for(".index")) | Delete group. |
1,205 | def remote_property(name, get_command, set_command, field_name, doc=None):
def getter(self):
try:
return getattr(self, name)
except AttributeError:
value = getattr(self.sendCommand(get_command()), field_name)
setattr(self, name, value)
return value
def setter(self, value):
setattr(self, name, value)
self.sendCommand(set_command(value))
return property(getter, setter, doc=doc) | Property decorator that facilitates writing properties for values from a remote device.
Arguments:
name: The field name to use on the local object to store the cached property.
get_command: A function that returns the remote value of the property.
set_command: A function that accepts a new value for the property and sets it remotely.
field_name: The name of the field to retrieve from the response message to get operations. |
1,206 | def _get_host_details(self):
msg = "%s is not a valid system type " % stype
raise exception.IloError(msg)
else:
msg = self._get_extended_error(system)
raise exception.IloError(msg)
return system | Get the system details. |
1,207 | def __push_symbol(self, symbol):
self.__send_command("getSymbol", symbol)
while not {, , } <= set(self.data):
sleep(0.1) | Ask the websocket for a symbol push. Gets instrument, orderBook, quote, and trade |
1,208 | def astype(self, col_dtypes, **kwargs):
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes) | Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes. |
1,209 | def raise_for_response(self, responses):
exception_messages = [self.client.format_exception_message(response) for response in responses]
if len(exception_messages) == 1:
message = exception_messages[0]
else:
message = "[%s]" % ", ".join(exception_messages)
raise PostmarkerException(message) | Constructs appropriate exception from list of responses and raises it. |
1,210 | def retry(self):
self.stream = None
self.config.loop.call_later(RETRY_TIMER, self.start)
_LOGGER.debug(, self.config.host) | No connection to device, retry connection after 15 seconds. |
1,211 | def _find_binary(binary=None):
found = None
if binary is not None:
if os.path.isabs(binary) and os.path.isfile(binary):
return binary
if not os.path.isabs(binary):
try:
found = _which(binary)
log.debug("Found potential binary paths: %s"
% .join([path for path in found]))
found = found[0]
except IndexError as ie:
log.info("Could not determine absolute path of binary: "
% binary)
elif os.access(binary, os.X_OK):
found = binary
if found is None:
try: found = _which(, abspath_only=True, disallow_symlinks=True)[0]
except IndexError as ie:
log.error("Could not find binary for .")
try: found = _which()[0]
except IndexError as ie:
log.error("Could not find binary for .")
if found is None:
raise RuntimeError("GnuPG is not installed!")
return found | Find the absolute path to the GnuPG binary.
Also run checks that the binary is not a symlink, and check that
our process real uid has exec permissions.
:param str binary: The path to the GnuPG binary.
:raises: :exc:`~exceptions.RuntimeError` if it appears that GnuPG is not
installed.
:rtype: str
:returns: The absolute path to the GnuPG binary to use, if no exceptions
occur. |
1,212 | def open_submission(self):
url = self.get_selected_item().get()
if url:
self.selected_page = self.open_submission_page(url) | Open the full submission and comment tree for the selected comment. |
1,213 | def add_data(self, minimum_address, maximum_address, data, overwrite):
if minimum_address == self.maximum_address:
self.maximum_address = maximum_address
self.data += data
elif maximum_address == self.minimum_address:
self.minimum_address = minimum_address
self.data = data + self.data
elif (overwrite
and minimum_address < self.maximum_address
and maximum_address > self.minimum_address):
self_data_offset = minimum_address - self.minimum_address
if self_data_offset < 0:
self_data_offset *= -1
self.data = data[:self_data_offset] + self.data
del data[:self_data_offset]
self.minimum_address = minimum_address
self_data_left = len(self.data) - self_data_offset
if len(data) <= self_data_left:
self.data[self_data_offset:self_data_offset + len(data)] = data
data = bytearray()
else:
self.data[self_data_offset:] = data[:self_data_left]
data = data[self_data_left:]
if len(data) > 0:
self.data += data
self.maximum_address = maximum_address
else:
raise AddDataError(
) | Add given data to this segment. The added data must be adjacent to
the current segment data, otherwise an exception is thrown. |
1,214 | def _Authenticate(self):
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.msg == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.msg == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.msg == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.msg == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.msg == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.msg == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.msg == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.msg == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return | Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin. |
1,215 | def CopyDirectory(source_dir, target_dir, override=False):
_AssertIsLocal(source_dir)
_AssertIsLocal(target_dir)
if override and IsDir(target_dir):
DeleteDirectory(target_dir, skip_on_error=False)
import shutil
shutil.copytree(source_dir, target_dir) | Recursively copy a directory tree.
:param unicode source_dir:
Where files will come from
:param unicode target_dir:
Where files will go to
:param bool override:
If True and target_dir already exists, it will be deleted before copying.
:raises NotImplementedForRemotePathError:
If trying to copy to/from remote directories |
1,216 | def add_predicate(self, pred_obj):
pred_id = pred_obj.get_id()
if not pred_id in self.idx:
pred_node = pred_obj.get_node()
self.node.append(pred_node)
self.idx[pred_id] = pred_node
else:
print() | Adds a predicate object to the layer
@type pred_obj: L{Cpredicate}
@param pred_obj: the predicate object |
1,217 | def plotMultipleInferenceRun(stats,
fields,
basename,
plotDir="plots"):
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
colorList = [, , , , , , ]
for i, field in enumerate(fields):
fieldKey = field[0] + " C0"
trace = []
for s in stats:
trace += s[fieldKey]
plt.plot(trace, label=field[1], color=colorList[i])
plt.legend(loc="upper right")
plt.xlabel("Input number")
plt.xticks(range(0, len(stats)*stats[0]["numSteps"]+1,5))
plt.ylabel("Number of cells")
plt.ylim(-5, 55)
plt.title("Inferring combined sensorimotor and temporal sequence stream")
relPath = "{}_exp_combined.pdf".format(basename)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close() | Plots individual inference runs. |
1,218 | def init(celf, *, loop = None, unregister = None, message = None) :
"for consistency with other classes that don’t want caller to instantiate directly."
return \
celf \
(
loop = loop,
unregister = unregister,
message = message,
) | for consistency with other classes that don’t want caller to instantiate directly. |
1,219 | def LE32(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False):
return UInt32(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_LE, fuzzable=fuzzable, name=name, full_range=full_range) | 32-bit field, Little endian encoded |
1,220 | def make_client(zhmc, userid=None, password=None):
global USERID, PASSWORD
USERID = userid or USERID or \
six.input(.format(zhmc))
PASSWORD = password or PASSWORD or \
getpass.getpass(.format(USERID))
session = zhmcclient.Session(zhmc, USERID, PASSWORD)
session.logon()
client = zhmcclient.Client(session)
print(.
format(zhmc, USERID))
return client | Create a `Session` object for the specified HMC and log that on. Create a
`Client` object using that `Session` object, and return it.
If no userid and password are specified, and if no previous call to this
method was made, userid and password are interactively inquired.
Userid and password are saved in module-global variables for future calls
to this method. |
1,221 | def _buildTemplates(self):
contents = self._renderTemplate(self.template_name, extraContext=None)
f = self.main_file_name
main_url = self._save2File(contents, f, self.output_path)
return main_url | do all the things necessary to build the viz
should be adapted to work for single-file viz, or multi-files etc.
:param output_path:
:return: |
1,222 | def loadModel(self, model_file):
with open(model_file) as f:
self.q_table = json.load(f) | load q table from model_file |
1,223 | def showGrid( self ):
delegate = self.itemDelegate()
if ( isinstance(delegate, XTreeWidgetDelegate) ):
return delegate.showGrid()
return False | Returns whether or not this delegate should draw its grid lines.
:return <bool> |
1,224 | def copy(self):
kb = KeyBundle()
kb._keys = self._keys[:]
kb.cache_time = self.cache_time
kb.verify_ssl = self.verify_ssl
if self.source:
kb.source = self.source
kb.fileformat = self.fileformat
kb.keytype = self.keytype
kb.keyusage = self.keyusage
kb.remote = self.remote
return kb | Make deep copy of this KeyBundle
:return: The copy |
1,225 | def change_axis(self, axis_num, channel_name):
current_channels = list(self.current_channels)
if len(current_channels) == 1:
if axis_num == 0:
new_channels = channel_name,
else:
new_channels = current_channels[0], channel_name
else:
new_channels = list(current_channels)
new_channels[axis_num] = channel_name
self.set_axes(new_channels, self.ax) | TODO: refactor that and set_axes
what to do with ax?
axis_num: int
axis number
channel_name: str
new channel to plot on that axis |
1,226 | def callback(self, event):
artist = event.artist
ind = artist.ind
limit = 5
browser = True
if len(event.ind) > limit:
print "more than %s genes selected; not spawning browsers" % limit
browser = False
for i in event.ind:
feature = artist.features[ind[i]]
print feature,
if browser:
self.minibrowser.plot(feature) | Callback function to spawn a mini-browser when a feature is clicked. |
1,227 | def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids,
cores_inner, cores_outer):
utilslib = _load_cdll()
utilslib.multi_normxcorr_fftw.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str()),
ctypes.c_long, ctypes.c_long, ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str()),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.float32,
flags=native_str()),
ctypes.c_long,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str()),
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str()),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.intc,
flags=native_str())]
utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
used_chans = []
template_len = template_array[seed_ids[0]].shape[1]
for seed_id in seed_ids:
used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
template_array[seed_id] = (
(template_array[seed_id] -
template_array[seed_id].mean(axis=-1, keepdims=True)) / (
template_array[seed_id].std(axis=-1, keepdims=True) *
template_len))
template_array[seed_id] = np.nan_to_num(template_array[seed_id])
n_channels = len(seed_ids)
n_templates = template_array[seed_ids[0]].shape[0]
image_len = stream_array[seed_ids[0]].shape[0]
fft_len = next_fast_len(template_len + image_len - 1)
template_array = np.ascontiguousarray([template_array[x]
for x in seed_ids],
dtype=np.float32)
for x in seed_ids:
if not np.all(stream_array[x] == 0) and np.var(stream_array[x]) < 1e-8:
stream_array *= 1e8
warnings.warn("Low variance found for {0}, applying gain "
"to stabilise correlations".format(x))
stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
dtype=np.float32)
cccs = np.zeros((n_templates, image_len - template_len + 1),
np.float32)
used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
pad_array_np = np.ascontiguousarray([pad_array[seed_id]
for seed_id in seed_ids],
dtype=np.intc)
variance_warnings = np.ascontiguousarray(
np.zeros(n_channels), dtype=np.intc)
ret = utilslib.multi_normxcorr_fftw(
template_array, n_templates, template_len, n_channels, stream_array,
image_len, cccs, fft_len, used_chans_np, pad_array_np, cores_outer,
cores_inner, variance_warnings)
if ret < 0:
raise MemoryError("Memory allocation failed in correlation C-code")
elif ret not in [0, 999]:
print()
print( %
(cccs.max(), np.unravel_index(cccs.argmax(), cccs.shape)))
print( %
(cccs.min(), np.unravel_index(cccs.argmin(), cccs.shape)))
raise CorrelationError("Internal correlation error")
elif ret == 999:
warnings.warn("Some correlations not computed, are there "
"zeros in data? If not, consider increasing gain.")
for i, variance_warning in enumerate(variance_warnings):
if variance_warning and variance_warning > template_len:
warnings.warn("Low variance found in {0} places for {1},"
" check result.".format(variance_warning,
seed_ids[i]))
return cccs, used_chans | Use a C loop rather than a Python loop - in some cases this will be fast.
:type template_array: dict
:param template_array:
:type stream_array: dict
:param stream_array:
:type pad_array: dict
:param pad_array:
:type seed_ids: list
:param seed_ids:
rtype: np.ndarray, list
:return: 3D Array of cross-correlations and list of used channels. |
1,228 | def get_routes(
feed: "Feed", date: Optional[str] = None, time: Optional[str] = None
) -> DataFrame:
if date is None:
return feed.routes.copy()
trips = feed.get_trips(date, time)
R = trips["route_id"].unique()
return feed.routes[feed.routes["route_id"].isin(R)] | Return a subset of ``feed.routes``
Parameters
-----------
feed : Feed
date : string
YYYYMMDD date string restricting routes to only those active on
the date
time : string
HH:MM:SS time string, possibly with HH > 23, restricting routes
to only those active during the time
Returns
-------
DataFrame
A subset of ``feed.routes``
Notes
-----
Assume the following feed attributes are not ``None``:
- ``feed.routes``
- Those used in :func:`.trips.get_trips`. |
1,229 | def path_exists_glob(path):
*
return True if glob.glob(os.path.expanduser(path)) else False | Tests to see if path after expansion is a valid path (file or directory).
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' file.path_exists_glob /etc/pam*/pass* |
1,230 | def mix(color1, color2, pos=0.5):
opp_pos = 1 - pos
red = color1[0] * pos + color2[0] * opp_pos
green = color1[1] * pos + color2[1] * opp_pos
blue = color1[2] * pos + color2[2] * opp_pos
return int(red), int(green), int(blue) | Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos) |
1,231 | def rmdir(self, parents=False):
if parents:
os.removedirs(self.path)
else:
os.rmdir(self.path) | Removes this directory, provided it is empty.
Use :func:`~rpaths.Path.rmtree` if it might still contain files.
:param parents: If set to True, it will also destroy every empty
directory above it until an error is encountered. |
1,232 | def add_collaboration(self, collaboration):
collaborations = normalize_collaboration(collaboration)
for collaboration in collaborations:
self._append_to(, {
: collaboration
}) | Add collaboration.
:param collaboration: collaboration for the current document
:type collaboration: string |
1,233 | def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation):
end_time = datetime.datetime.now()
duration = end_time - start_time
duration_sec = float(duration.seconds * 1000)
duration_ms = float(duration.microseconds / 1000)
duration = duration_sec + duration_ms
package_loss = 100 - ((float(count_received) / float(count_sent)) * 100)
print(f)
try:
print(f)
except ZeroDivisionError:
print(f)
print(
% (
min_time.seconds*1000 + float(min_time.microseconds)/1000,
float(avg_time) / 1000,
max_time.seconds*1000 + float(max_time.microseconds)/1000,
float(deviation)
)
) | Print ping exit statistics |
1,234 | def specificity(result, reference):
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tn = numpy.count_nonzero(~result & ~reference)
fp = numpy.count_nonzero(result & ~reference)
try:
specificity = tn / float(tn + fp)
except ZeroDivisionError:
specificity = 0.0
return specificity | Specificity.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
specificity : float
The specificity between two binary datasets, here mostly binary objects in images,
which denotes the fraction of correctly returned negatives. The
specificity is not symmetric.
See also
--------
:func:`sensitivity`
Notes
-----
Not symmetric. The completment of the specificity is :func:`sensitivity`.
High recall means that an algorithm returned most of the irrelevant results.
References
----------
.. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion |
1,235 | def get_model_paths(model_dir):
all_models = gfile.Glob(os.path.join(model_dir, ))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = [
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames]
model_names = sorted(model_numbers_names)
return [os.path.join(model_dir, name[1]) for name in model_names] | Returns all model paths in the model_dir. |
1,236 | def pop_marker(self, reset):
marker = self.markers.pop()
if reset:
marker.extend(self.look_ahead)
self.look_ahead = marker
elif self.markers:
self.markers[-1].extend(marker)
else:
pass | Pop a marker off of the marker stack. If reset is True then the
iterator will be returned to the state it was in before the
corresponding call to push_marker(). |
1,237 | def extract(self, html_text: str,
extract_title: bool = False,
extract_meta: bool = False,
extract_microdata: bool = False,
microdata_base_url: str = "",
extract_json_ld: bool = False,
extract_rdfa: bool = False,
rdfa_base_url: str = "") \
-> List[Extraction]:
res = list()
soup = BeautifulSoup(html_text, )
if soup.title and extract_title:
title = self._wrap_data("title", soup.title.string.encode().decode())
res.append(title)
if soup.title and extract_meta:
meta_content = self._wrap_meta_content(soup.find_all("meta"))
meta_data = self._wrap_data("meta", meta_content)
res.append(meta_data)
if extract_microdata:
mde = MicrodataExtractor()
mde_data = self._wrap_data("microdata", mde.extract(html_text, microdata_base_url))
res.append(mde_data)
if extract_json_ld:
jslde = JsonLdExtractor()
jslde_data = self._wrap_data("json-ld", jslde.extract(html_text))
res.append(jslde_data)
if extract_rdfa:
rdfae = RDFaExtractor()
rdfae_data = self._wrap_data("rdfa", rdfae.extract(html_text, rdfa_base_url))
res.append(rdfae_data)
return res | Args:
html_text (str): input html string to be extracted
extract_title (bool): True if string of 'title' tag needs to be extracted, return as { "title": "..." }
extract_meta (bool): True if string of 'meta' tags needs to be extracted, return as { "meta": { "author": "...", ...}}
extract_microdata (bool): True if microdata needs to be extracted, returns as { "microdata": [...] }
microdata_base_url (str): base namespace url for microdata, empty string if no base url is specified
extract_json_ld (bool): True if json-ld needs to be extracted, return as { "json-ld": [...] }
extract_rdfa (bool): True if rdfs needs to be extracted, returns as { "rdfa": [...] }
rdfa_base_url (str): base namespace url for rdfa, empty string if no base url is specified
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches. |
1,238 | def build(port=8000, fixtures=None):
extractor = Extractor()
parser = Parser(extractor.url_details, fixtures)
parser.parse()
url_details = parser.results
_store = get_store(url_details)
store = json.dumps(_store)
variables = str(Variable(, , store))
functions = DATA_FINDER + GET_HANDLER + MODIFY_HANDLER + POST_HANDLER
endpoints = []
endpoint_uris = []
for u in parser.results:
endpoint = Endpoint()
if u[].lower() in [, ]:
method = u[].lower()
else:
method =
response = str(ResponseBody(method))
u[], list_url = clean_url(u[], _store, u[].lower())
if list_url is not None and u[].lower() == :
list_endpoint = Endpoint()
list_endpoint.construct(, list_url, response)
if str(list_endpoint) not in endpoints:
endpoints.append(str(list_endpoint))
if list_endpoint.uri not in endpoint_uris:
endpoint_uris.append(list_endpoint.uri)
if method == :
without_prefix = re.sub(r, , u[])
for k, v in _store.items():
if without_prefix in k:
options = v.get(, )
options = ast.literal_eval(options)
modifiers = []
if options is not None:
modifiers = options.get(, [])
if modifiers:
for mod in modifiers:
if u[].lower() == mod:
mod_endpoint = Endpoint()
uri = without_prefix
if v.get() is not None and v[] == :
uri = re.sub(r, , u[])
mod_endpoint.construct(u[].lower(), uri, response)
if str(mod_endpoint) not in endpoints:
endpoints.append(str(mod_endpoint))
if mod_endpoint.uri not in endpoint_uris:
endpoint_uris.append(mod_endpoint.uri)
else:
endpoint.construct(u[], u[], response)
if str(endpoint) not in endpoints:
endpoints.append(str(endpoint))
if endpoint.uri not in endpoint_uris:
endpoint_uris.append(endpoint.uri)
endpoints = .join(endpoints)
express = ExpressServer()
express.construct(variables, functions, endpoints, port)
return express | Builds a server file.
1. Extract mock response details from all valid docstrings in existing views
2. Parse and generate mock values
3. Create a store of all endpoints and data
4. Construct server file |
1,239 | def get_settings():
s = getattr(settings, , {})
s = {
: s.get(, False),
}
return s | This function returns a dict containing default settings |
1,240 | def encode(B):
B = array(B)
flatten = False
if len(B.shape) == 1:
flatten = True
B = B.reshape(1, -1)
if B.shape[1] != data_size:
raise ValueError()
C = dot(G, B.T).T % 2
if flatten:
C = C.flatten()
return C | Encode data using Hamming(7, 4) code.
E.g.:
encode([0, 0, 1, 1])
encode([[0, 0, 0, 1],
[0, 1, 0, 1]])
:param array B: binary data to encode (must be shaped as (4, ) or (-1, 4)). |
1,241 | def getResponseAction(self, ps, action):
opName = self.getOperationName(ps, action)
if self.wsAction.has_key(opName) is False:
raise WSActionNotSpecified, %opName
return self.wsAction[opName] | Returns response WS-Action if available
action -- request WS-Action value. |
1,242 | def create_domain(self,
service_id,
version_number,
name,
comment=None):
body = self._formdata({
"name": name,
"comment": comment,
}, FastlyDomain.FIELDS)
content = self._fetch("/service/%s/version/%d/domain" % (service_id, version_number), method="POST", body=body)
return FastlyDomain(self, content) | Create a domain for a particular service and version. |
1,243 | def _sort_shared_logical_disks(logical_disks):
is_shared = (lambda x: True if ( in x and
x[]) else False)
num_of_disks = (lambda x: x[]
if in x else
constants.RAID_LEVEL_MIN_DISKS[x[]])
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x[] ==
else logical_disks_shared_excl_raid1)
target.append(x)
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
check = True
for x in logical_disks_shared:
if x[] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y[] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
logical_disks_sorted = (logical_disks_nonshared +
logical_disks_shared_raid1 +
logical_disks_shared)
return logical_disks_sorted | Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions. |
1,244 | def get_metadata(doi):
url = crossref_url + + doi
res = requests.get(url)
if res.status_code != 200:
logger.info( %
(doi, res.status_code))
return None
raw_message = res.json()
metadata = raw_message.get()
return metadata | Returns the metadata of an article given its DOI from CrossRef
as a JSON dict |
1,245 | def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True):
if htilde.precision != :
raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision")
if copy:
out = htilde.copy()
else:
out = htilde
kmin = numpy.int32(kmin)
kmax = numpy.int32(len(htilde))
nb = int(numpy.ceil(kmax / nt_float))
if nb > 1024:
raise ValueError("More than 1024 blocks not supported yet")
phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f)
fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax)
if copy:
htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch,
copy=False)
return htilde | Shifts a frequency domain waveform in time. The waveform is assumed to
be sampled at equal frequency intervals. |
1,246 | def parse(self, path):
expath, template, root = self._load_template(path)
if expath is not None:
values = template.parse(root)
return (values, template) | Extracts a dictionary of values from the XML file at the specified path. |
1,247 | def add_dat_file(filename, settings, container=None, **kwargs):
importers = {
11: _read_general_type,
}
file_type, content = _read_file(filename)
if file_type not in importers:
raise Exception(
.format(file_type)
)
header, data = importers[file_type](content, settings)
timestep = settings.get(, 0)
data[] = timestep
if container is None:
container = ERT(data)
else:
container.data = pd.concat((container.data, data))
return container | Read a RES2DINV-style file produced by the ABEM export program. |
1,248 | def period_break(dates, period):
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return np.nonzero(current - previous)[0] | Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor. |
1,249 | def toDict(self):
d = {}
nindices = self.getNumIndices()
for i in range(self.getNumRows()):
row = list(self.getRowByIndex(i))
if nindices > 1:
key = tuple(row[:nindices])
elif nindices == 1:
key = row[0]
else:
key = None
if len(row) - nindices == 0:
d[key] = None
elif len(row) - nindices == 1:
d[key] = row[nindices]
else:
d[key] = tuple(row[nindices:])
return d | Return a dictionary with the DataFrame data. |
1,250 | async def move_to(self, channel: discord.VoiceChannel):
if channel.guild != self.channel.guild:
raise TypeError("Cannot move to a different guild.")
self.channel = channel
await self.connect() | Moves this player to a voice channel.
Parameters
----------
channel : discord.VoiceChannel |
1,251 | def do_login(session, for_what):
username, password = request_username_password(for_what)
try:
session.login(username, password)
except ClientInteractionRequest as cir:
params = request_interaction(cir)
session.continue_login(cir.login_token, **params) | Performs a login handshake with a user on the command-line. This method
will handle all of the follow-up requests (e.g. capcha or two-factor). A
login that requires two-factor looks like this::
>>> import mwapi.cli
>>> import mwapi
>>> mwapi.cli.do_login(mwapi.Session("https://en.wikipedia.org"), "English Wikipedia")
Log into English Wikipedia
Username: Halfak (WMF)
Passord:
Please enter verification code from your mobile app
Token(OATHToken): 234567
:Parameters:
session : :class:`mwapi.Session`
A session object to use for login
for_what : `str`
A name to display to the use (for what they are logging into) |
1,252 | def to_xdr_object(self):
tx = self.tx.to_xdr_object()
return Xdr.types.TransactionEnvelope(tx, self.signatures) | Get an XDR object representation of this
:class:`TransactionEnvelope`. |
1,253 | def set_tuning(self, tuning):
if self.instrument:
self.instrument.tuning = tuning
self.tuning = tuning
return self | Set the tuning attribute on both the Track and its instrument (when
available).
Tuning should be a StringTuning or derivative object. |
1,254 | def mysql_timestamp_converter(s):
if s[4] == : return DateTime_or_None(s)
s = s + "0"*(14-len(s))
parts = map(int, filter(None, (s[:4],s[4:6],s[6:8],
s[8:10],s[10:12],s[12:14])))
try:
return Timestamp(*parts)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None | Convert a MySQL TIMESTAMP to a Timestamp object. |
1,255 | def fillna(self, value=None, method=None, limit=None):
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError(
.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True) | Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled |
1,256 | def export_opml():
with Database("feeds") as feeds:
print()
print()
for name in list(feeds.keys()):
kind = feedparser.parse(feeds[name]).version
if kind[:4] == :
t =
elif kind[:3] == :
t =
print( % (name, feeds[name], t))
print()
print() | Export an OPML feed list |
1,257 | def auth(username, password):
def middleware(conn):
conn.login(username, password)
return middleware | Middleware implementing authentication via LOGIN.
Most of the time this middleware needs to be placed
*after* TLS.
:param username: Username to login with.
:param password: Password of the user. |
1,258 | def python_2_unicode_compatible(klass):
if PY2:
if not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesnutf-8')
return klass | A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class. |
1,259 | def openid_authorization_validator(self, request):
request_info = super(HybridGrant, self).openid_authorization_validator(request)
if not request_info:
return request_info
if request.response_type in ["code id_token", "code id_token token"]:
if not request.nonce:
raise InvalidRequestError(
request=request,
description=
)
return request_info | Additional validation when following the Authorization Code flow. |
1,260 | def upload(self, file_obj=None, file_path=None, name=None, data=None):
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, )
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post(, files={: file_arg})
return response
finally:
if close:
content.close() | Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server |
1,261 | def define_mask_borders(image2d, sought_value, nadditional=0):
naxis2, naxis1 = image2d.shape
mask2d = np.zeros((naxis2, naxis1), dtype=bool)
borders = []
for i in range(naxis2):
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
borders.append((jborder_min, jborder_max))
if (jborder_min, jborder_max) != (-1, naxis1):
if jborder_min != -1:
j1 = 0
j2 = jborder_min + nadditional + 1
mask2d[i, j1:j2] = True
if jborder_max != naxis1:
j1 = jborder_max - nadditional
j2 = naxis1
mask2d[i, j1:j2] = True
return mask2d, borders | Generate mask avoiding undesired values at the borders.
Set to True image borders with values equal to 'sought_value'
Parameters
----------
image2d : numpy array
Initial 2D image.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
nadditional : int
Number of additional pixels to be masked at each border.
Returns
-------
mask2d : numpy array
2D mask.
borders : list of tuples
List of tuples (jmin, jmax) with the border limits (in array
coordinates) found by find_pix_borders. |
1,262 | def _GetVisibilityPolicy():
try:
visibility_config = yaml_data_visibility_config_reader.OpenAndRead()
except yaml_data_visibility_config_reader.Error as err:
return error_data_visibility_policy.ErrorDataVisibilityPolicy(
% err)
if visibility_config:
return glob_data_visibility_policy.GlobDataVisibilityPolicy(
visibility_config.blacklist_patterns,
visibility_config.whitelist_patterns)
return None | If a debugger configuration is found, create a visibility policy. |
1,263 | def to_binary(s, encoding=):
if PY3:
return s if isinstance(s, binary_type) else binary_type(s, encoding=encoding)
return binary_type(s) | Portable cast function.
In python 2 the ``str`` function which is used to coerce objects to bytes does not
accept an encoding argument, whereas python 3's ``bytes`` function requires one.
:param s: object to be converted to binary_type
:return: binary_type instance, representing s. |
1,264 | def output(self):
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close() | Produce a classic generator for this cell's final results. |
1,265 | def raw_sensor_strings(self):
try:
with open(self.sensorpath, "r") as f:
data = f.readlines()
except IOError:
raise NoSensorFoundError(self.type_name, self.id)
if data[0].strip()[-3:] != "YES":
raise SensorNotReadyError(self)
return data | Reads the raw strings from the kernel module sysfs interface
:returns: raw strings containing all bytes from the sensor memory
:rtype: str
:raises NoSensorFoundError: if the sensor could not be found
:raises SensorNotReadyError: if the sensor is not ready yet |
1,266 | def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance):
if not isinstance(obj, dict):
raise_with_traceback(_type_mismatch_error(obj, dict))
if key_type is str:
key_type = string_types
if value_type is str:
value_type = string_types
for key, value in obj.items():
if key_type and not key_check(key, key_type):
raise_with_traceback(
CheckError(
.format(
key_type=repr(key_type), obj_repr=repr(key)
)
)
)
if value_type and not value_check(value, value_type):
raise_with_traceback(
CheckError(
.format(
vtype=repr(value_type), obj_type=type(value), key=key, value=value
)
)
)
return obj | Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types
specified by key_type, value_type. |
1,267 | def fit_predict(self, sequences, y=None):
if hasattr(super(MultiSequenceClusterMixin, self), ):
check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory)
labels = super(MultiSequenceClusterMixin, self).fit_predict(sequences)
else:
self.fit(sequences)
labels = self.predict(sequences)
if not isinstance(labels, list):
labels = self._split(labels)
return labels | Performs clustering on X and returns cluster labels.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
Returns
-------
Y : list of ndarray, each of shape [sequence_length, ]
Cluster labels |
1,268 | def locale_escape(string, errors=):
encoding = locale.getpreferredencoding()
string = string.encode(encoding, errors).decode()
return string | Mangle non-supported characters, for savages with ascii terminals. |
1,269 | def update(self, τ: float = 1.0, update_indicators=True, dampen=False):
for n in self.nodes(data=True):
n[1]["next_state"] = n[1]["update_function"](n)
for n in self.nodes(data=True):
n[1]["rv"].dataset = n[1]["next_state"]
for n in self.nodes(data=True):
for i in range(self.res):
self.s0[i][n[0]] = n[1]["rv"].dataset[i]
if dampen:
self.s0[i][f"∂({n[0]})/∂t"] = self.s0_original[
f"∂({n[0]})/∂t"
] * exp(-τ * self.t)
if update_indicators:
for indicator in n[1]["indicators"].values():
indicator.samples = np.random.normal(
indicator.mean * np.array(n[1]["rv"].dataset),
scale=0.01,
)
self.t += self.Δt | Advance the model by one time step. |
1,270 | def generate_random_string(number_of_random_chars=8, character_set=string.ascii_letters):
return u().join(random.choice(character_set)
for _ in range(number_of_random_chars)) | Generate a series of random characters.
Kwargs:
number_of_random_chars (int) : Number of characters long
character_set (str): Specify a character set. Default is ASCII |
1,271 | def get_portchannel_info_by_intf_output_lacp_partner_brcd_state(self, **kwargs):
config = ET.Element("config")
get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf")
config = get_portchannel_info_by_intf
output = ET.SubElement(get_portchannel_info_by_intf, "output")
lacp = ET.SubElement(output, "lacp")
partner_brcd_state = ET.SubElement(lacp, "partner-brcd-state")
partner_brcd_state.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
1,272 | def cache(descriptor=None, *, store: IStore = None):
if descriptor is None:
return functools.partial(cache, store=store)
hasattrs = {
: hasattr(descriptor, ),
: hasattr(descriptor, ),
: hasattr(descriptor, )
}
descriptor_name = get_descriptor_name(descriptor)
class CacheDescriptor(ICacheDescriptor):
def __init__(self):
if descriptor_name is not None:
self.__name__ = descriptor_name
cache_descriptor = CacheDescriptor()
if store is None:
store = FieldStore(cache_descriptor)
elif not isinstance(store, IStore):
raise TypeError(f)
if hasattrs[]:
def get(self, obj, objtype):
if obj is None:
return descriptor.__get__(obj, objtype)
value = store.get(self, obj, defval=NOVALUE)
if value is NOVALUE:
value = descriptor.__get__(obj, objtype)
store.set(self, obj, value)
return value
CacheDescriptor.__get__ = get
if hasattrs[]:
def set(self, obj, value):
store.pop(self, obj)
descriptor.__set__(obj, value)
CacheDescriptor.__set__ = set
if hasattrs[]:
def delete(self, obj):
store.pop(self, obj)
descriptor.__delete__(obj)
CacheDescriptor.__delete__ = delete
return cache_descriptor | usage:
``` py
@cache
@property
def name(self): pass
``` |
1,273 | def _unpack(self, data):
current = self
while current is not None:
data = current._parser.unpack(data, current)
last = current
current = getattr(current, , None)
_set(last, , data) | Unpack a struct from bytes. For parser internal use. |
1,274 | def validateDatetime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None,
formats=(, , , , ,
, , , , ,
, , , , ), excMsg=None):
try:
return _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes)
except ValidationException:
_raiseValidationException(_() % (_errstr(value)), excMsg) | Raises ValidationException if value is not a datetime formatted in one
of the formats formats. Returns a datetime.datetime object of value.
* value (str): The value being validated as a datetime.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateDatetime('2018/10/31 12:00:01')
datetime.datetime(2018, 10, 31, 12, 0, 1)
>>> pysv.validateDatetime('10/31/2018 12:00:01')
datetime.datetime(2018, 10, 31, 12, 0, 1)
>>> pysv.validateDatetime('10/31/2018')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time. |
1,275 | def is_entailed_by(self, other):
other = ListCell.coerce(other)
if other.size() < self.size():
not self.value[i].is_entailed_by(oval):
return False
elif self.value[i] != oval:
return False
return True | Returns True iff the values in this list can be entailed by the other
list (ie, this list is a prefix of the other) |
1,276 | def formatHeadings(self, text, isMain):
doNumberHeadings = False
showEditLink = True
if text.find(u"__NOEDITSECTION__") != -1:
showEditLink = False
text = text.replace(u"__NOEDITSECTION__", u"")
matches = _headerPat.findall(text)
numMatches = len(matches)
canonized_headline = _tagPat.sub(u, canonized_headline)
tocline = canonized_headline.strip()
if doNumberHeadings and numMatches > 1:
headline = numbering + u + headline
anchor = canonized_headline;
if refcount[headlineCount] > 1:
anchor += u + unicode(refcount[headlineCount])
if enoughToc:
toc.append(u)
toc.append(to_unicode(toclevel))
toc.append(u)
toc.append(anchor)
toc.append(u)
toc.append(numbering)
toc.append(u)
toc.append(tocline)
toc.append(u)
if headlineCount not in head:
head[headlineCount] = []
h = head[headlineCount]
h.append(u)
h.append(to_unicode(level))
h.append(u)
h.append(anchor)
h.append()
h.append(matches[headlineCount][1].strip())
h.append(headline.strip())
h.append(u)
h.append(to_unicode(level))
h.append(u)
headlineCount += 1
if not istemplate:
sectionCount += 1
if enoughToc:
if toclevel < wgMaxTocLevel:
toc.append(u"</li>\n")
toc.append(u"</ul>\n</li>\n" * max(0, toclevel - 1))
toc.insert(0, u)
toc.append(u)
blocks = _headerPat.split(text)
i = 0
len_blocks = len(blocks)
forceTocPosition = text.find(u"<!--MWTOC-->")
full = []
while i < len_blocks:
j = i/4
full.append(blocks[i])
if enoughToc and not i and isMain and forceTocPosition == -1:
full += toc
toc = None
if j in head and head[j]:
full += head[j]
head[j] = None
i += 4
full = u.join(full)
if forceTocPosition != -1:
return full.replace(u"<!--MWTOC-->", u.join(toc), 1)
else:
return full | This function accomplishes several tasks:
1) Auto-number headings if that option is enabled
2) Add an [edit] link to sections for logged in users who have enabled the option
3) Add a Table of contents on the top for users who have enabled the option
4) Auto-anchor headings
It loops through all headlines, collects the necessary data, then splits up the
string and re-inserts the newly formatted headlines. |
1,277 | def list_projects(self):
method =
url = .format(
token=self.client.api_token)
json_data = self.client.request(method, url)
return json_data | Return a list of all followed projects. |
1,278 | def _get_revision(self, revision):
if self._empty:
raise EmptyRepositoryError("There are no changesets yet")
if revision in [-1, , None]:
revision =
try:
revision = hex(self._repo.lookup(revision))
except (IndexError, ValueError, RepoLookupError, TypeError):
raise ChangesetDoesNotExistError("Revision %s does not "
"exist for this repository"
% (revision))
return revision | Get's an ID revision given as str. This will always return a fill
40 char revision number
:param revision: str or int or None |
1,279 | def _remove_lead_trail_false(bool_list):
for i in (0, -1):
while bool_list and not bool_list[i]:
bool_list.pop(i)
return bool_list | Remove leading and trailing false's from a list |
1,280 | def _brzozowski_algebraic_method_init(self):
for state_a in self.mma.states:
if state_a.final:
self.B[state_a.stateid] = self.epsilon
else:
self.B[state_a.stateid] = self.empty
for state_b in self.mma.states:
self.A[state_a.stateid, state_b.stateid] = self.empty
for arc in state_a.arcs:
if arc.nextstate == state_b.stateid:
self.A[state_a.stateid, state_b.stateid] = \
self.mma.isyms.find(arc.ilabel) | Initialize Brzozowski Algebraic Method |
1,281 | def nearest_qmed_catchments(self, subject_catchment, limit=None, dist_limit=500):
dist_sq = Catchment.distance_to(subject_catchment).label()
query = self.db_session.query(Catchment, dist_sq). \
join(Catchment.amax_records). \
join(Catchment.descriptors). \
filter(Catchment.id != subject_catchment.id,
Catchment.is_suitable_for_qmed,
Catchment.country == subject_catchment.country,
dist_sq <= dist_limit ** 2). \
group_by(Catchment,
Descriptors.centroid_ngr_x,
Descriptors.centroid_ngr_y). \
order_by(dist_sq). \
having(func.count(AmaxRecord.catchment_id) >= 10)
if limit:
rows = query[0:limit]
else:
rows = query.all()
catchments = []
for row in rows:
catchment = row[0]
catchment.dist = sqrt(row[1])
catchments.append(catchment)
return catchments | Return a list of catchments sorted by distance to `subject_catchment` **and filtered to only include catchments
suitable for QMED analyses**.
:param subject_catchment: catchment object to measure distances to
:type subject_catchment: :class:`floodestimation.entities.Catchment`
:param limit: maximum number of catchments to return. Default: `None` (returns all available catchments).
:type limit: int
:param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the
maximum distance will increase computation time!
:type dist_limit: float or int
:return: list of catchments sorted by distance
:rtype: list of :class:`floodestimation.entities.Catchment` |
1,282 | def data_array_from_data_iterable(data_iterable):
try:
data_array = np.concatenate([item[0] for item in data_iterable])
except ValueError:
data_array = np.empty(0, dtype=np.uint32)
return data_array | Convert data iterable to raw data numpy array.
Parameters
----------
data_iterable : iterable
Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status).
Returns
-------
data_array : numpy.array
concatenated data array |
1,283 | def init_dataset_prepare_args(self, parser):
prepare
parser.add_argument(, , dest=, default=DEFAULT_USER_CONFIG_PATH,
help=)
parser.add_argument(, , help=)
parser.add_argument(, , help=) | Only invoked conditionally if subcommand is 'prepare' |
1,284 | def check_specs(specs, renamings, types):
from pythran.types.tog import unify, clone, tr
from pythran.types.tog import Function, TypeVariable, InferenceError
functions = {renamings.get(k, k): v for k, v in specs.functions.items()}
for fname, signatures in functions.items():
ftype = types[fname]
for signature in signatures:
sig_type = Function([tr(p) for p in signature], TypeVariable())
try:
unify(clone(sig_type), clone(ftype))
except InferenceError:
raise PythranSyntaxError(
"Specification for `{}` does not match inferred type:\n"
"expected `{}`\n"
"got `Callable[[{}], ...]`".format(
fname,
ftype,
", ".join(map(str, sig_type.types[:-1])))
) | Does nothing but raising PythranSyntaxError if specs
are incompatible with the actual code |
1,285 | def send_to_device(self, event_type, messages, txn_id=None):
txn_id = txn_id if txn_id else self._make_txn_id()
return self._send(
"PUT",
"/sendToDevice/{}/{}".format(event_type, txn_id),
content={"messages": messages}
) | Sends send-to-device events to a set of client devices.
Args:
event_type (str): The type of event to send.
messages (dict): The messages to send. Format should be
<user_id>: {<device_id>: <event_content>}.
The device ID may also be '*', meaning all known devices for the user.
txn_id (str): Optional. The transaction ID for this event, will be generated
automatically otherwise. |
1,286 | def render_code(self):
tmp_dir = os.environ.get(,)
view_code = os.path.join(tmp_dir,)
if os.path.exists(view_code):
try:
with open(view_code) as f:
return f.read()
except:
pass
return DEFAULT_CODE | Try to load the previous code (if we had a crash or something)
I should allow saving. |
1,287 | def isnap(self):
if self._isnap is UNDETERMINED:
istep = None
isnap = -1
while (istep is None or istep < self.istep) and isnap < 99999:
isnap += 1
istep = self.sdat.snaps[isnap].istep
self.sdat.snaps.bind(isnap, istep)
if istep != self.istep:
self._isnap = None
return self._isnap | Snapshot index corresponding to time step.
It is set to None if no snapshot exists for the time step. |
1,288 | def _exception_free_callback(self, callback, *args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception:
self._logger.exception("An exception occurred while calling a hook! ",exc_info=True)
return None | A wrapper that remove all exceptions raised from hooks |
1,289 | def backup(self):
output_path = self.base + PyFunceble.OUTPUTS["parent_directory"]
result = {PyFunceble.OUTPUTS["parent_directory"]: {}}
for root, _, files in PyFunceble.walk(output_path):
directories = Directory(root.split(output_path)[1]).fix_path()
local_result = result[PyFunceble.OUTPUTS["parent_directory"]]
for file in files:
file_path = root + PyFunceble.directory_separator + file
file_hash = Hash(file_path, "sha512", True).get()
lines_in_list = [line.rstrip("\n") for line in open(file_path)]
formatted_content = "@@@".join(lines_in_list)
local_result = local_result.setdefault(
directories,
{file: {"sha512": file_hash, "content": formatted_content}},
)
Dict(result).to_json(self.base + "dir_structure_production.json") | Backup the developer state of `output/` in order to make it restorable
and portable for user. |
1,290 | def service_post_save(instance, *args, **kwargs):
if instance.is_monitored and settings.REGISTRY_SKIP_CELERY:
check_service(instance.id)
elif instance.is_monitored:
check_service.delay(instance.id) | Used to do a service full check when saving it. |
1,291 | def uuid3(namespace, name):
import md5
hash = md5.md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3) | Generate a UUID from the MD5 hash of a namespace UUID and a name. |
1,292 | def getUTCDatetimeDOY(days=0, hours=0, minutes=0, seconds=0):
return (datetime.datetime.utcnow() +
datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)).strftime(DOY_Format) | getUTCDatetimeDOY -> datetime
Returns the UTC current datetime with the input timedelta arguments (days, hours, minutes, seconds)
added to current date. Returns ISO-8601 datetime format for day of year:
YYYY-DDDTHH:mm:ssZ |
1,293 | def start(self):
self.bot_start_time = datetime.now()
self.webserver = Webserver(self.config[][], self.config[][])
self.plugins.load()
self.plugins.load_state()
self._find_event_handlers()
self.sc = ThreadedSlackClient(self.config[])
self.always_send_dm = []
if in self.config:
self.always_send_dm.extend(map(lambda x: + x, self.config[]))
logging.getLogger().setLevel(logging.INFO)
self.is_setup = True
if self.test_mode:
self.metrics[] = (datetime.now() - self.bot_start_time).total_seconds() * 1000.0 | Initializes the bot, plugins, and everything. |
1,294 | def nsx_controller_connection_addr_method(self, **kwargs):
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop()
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
method = ET.SubElement(connection_addr, "method")
method.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
1,295 | def status(config, group, accounts=(), region=None):
config = validate.callback(config)
destination = config.get()
client = boto3.Session().client()
for account in config.get(, ()):
if accounts and account[] not in accounts:
continue
session = get_session(account[], region)
account_id = session.client().get_caller_identity()[]
prefix = destination.get(, ).rstrip() + % account_id
prefix = "%s/flow-log" % prefix
role = account.pop()
if isinstance(role, six.string_types):
account[] = role.split()[4]
else:
account[] = role[-1].split()[4]
account.pop()
try:
tag_set = client.get_object_tagging(
Bucket=destination[], Key=prefix).get(, [])
except ClientError:
account[] =
continue
tags = {t[]: t[] for t in tag_set}
if not in tags:
account[] =
else:
last_export = parse(tags[])
account[] = last_export.strftime()
accounts = [a for a in config.get() if a in accounts or not accounts]
accounts.sort(key=operator.itemgetter(), reverse=True)
print(tabulate(accounts, headers=)) | report current export state status |
1,296 | def cast(self, value):
if self.type is None:
return value
if self.type in (str, int, float):
try:
return self.type(value)
except Exception as e:
raise errors.BisonError(
.format(value, self.type)
) from e
| Cast a value to the type required by the option, if one is set.
This is used to cast the string values gathered from environment
variable into their required type.
Args:
value: The value to cast.
Returns:
The value casted to the expected type for the option. |
1,297 | def declare(self, name, memory_type=, memory_size=1, shared_region=None, offsets=None):
self.inst(Declare(name=name, memory_type=memory_type, memory_size=memory_size,
shared_region=shared_region, offsets=offsets))
return MemoryReference(name=name, declared_size=memory_size) | DECLARE a quil variable
This adds the declaration to the current program and returns a MemoryReference to the
base (offset = 0) of the declared memory.
.. note::
This function returns a MemoryReference and cannot be chained like some
of the other Program methods. Consider using ``inst(DECLARE(...))`` if you
would like to chain methods, but please be aware that you must create your
own MemoryReferences later on.
:param name: Name of the declared variable
:param memory_type: Type of the declared memory: 'BIT', 'REAL', 'OCTET' or 'INTEGER'
:param memory_size: Number of array elements in the declared memory.
:param shared_region: You can declare a variable that shares its underlying memory
with another region. This allows aliasing. For example, you can interpret an array
of measured bits as an integer.
:param offsets: If you are using ``shared_region``, this allows you to share only
a part of the parent region. The offset is given by an array type and the number
of elements of that type. For example,
``DECLARE target-bit BIT SHARING real-region OFFSET 1 REAL 4 BIT`` will let you use
target-bit to poke into the fourth bit of the second real from the leading edge of
real-region.
:return: a MemoryReference to the start of the declared memory region, ie a memory
reference to ``name[0]``. |
1,298 | def get_log_entries_by_search(self, log_entry_query, log_entry_search):
if not self._can():
raise PermissionDenied()
return self._provider_session.get_log_entries_by_search(log_entry_query, log_entry_search) | Pass through to provider LogEntrySearchSession.get_log_entries_by_search |
1,299 | def create_hdf_file(self):
mode =
if not self._overwrite and os.path.exists(self._fname):
mode =
self._hdf_file = h5py.File(self._fname, mode)
if self._hdf_basepath == :
self._group = self._hdf_file[]
else:
self._group = self._hdf_file.create_group(self._hdf_basepath) | :return: h5py DataSet |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.