Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
4,000 | def as_string(self, chars, current_linkable=False, class_current="active_link"):
return self.__do_menu("as_string", current_linkable, class_current, chars) | It returns menu as string |
4,001 | def full_name(self):
if self.requested_version is not None:
return % (self.name, self.requested_version)
return self.name | Return full package/distribution name, w/version |
4,002 | def get_dash(self):
dashes = ffi.new(, cairo.cairo_get_dash_count(self._pointer))
offset = ffi.new()
cairo.cairo_get_dash(self._pointer, dashes, offset)
self._check_status()
return list(dashes), offset[0] | Return the current dash pattern.
:returns:
A ``(dashes, offset)`` tuple of a list and a float.
:obj:`dashes` is a list of floats,
empty if no dashing is in effect. |
4,003 | def get_available_references(self, datas):
names = []
for k, v in datas.items():
if k.startswith(RULE_REFERENCE):
names.append(k[len(RULE_REFERENCE)+1:])
return names | Get available manifest reference names.
Every rules starting with prefix from ``nomenclature.RULE_REFERENCE``
are available references.
Only name validation is performed on these references.
Arguments:
datas (dict): Data where to search for reference declarations.
Returns:
list: List of every available reference names. This is the real
name unprefixed. |
4,004 | def hkdf(self, chaining_key, input_key_material, dhlen=64):
if len(chaining_key) != self.HASHLEN:
raise HashError("Incorrect chaining key length")
if len(input_key_material) not in (0, 32, dhlen):
raise HashError("Incorrect input key material length")
temp_key = self.hmac_hash(chaining_key, input_key_material)
output1 = self.hmac_hash(temp_key, b)
output2 = self.hmac_hash(temp_key, output1 + b)
return output1, output2 | Hash-based key derivation function
Takes a ``chaining_key'' byte sequence of len HASHLEN, and an
``input_key_material'' byte sequence with length either zero
bytes, 32 bytes or dhlen bytes.
Returns two byte sequences of length HASHLEN |
4,005 | def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs):
df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs)
return _split_result_for_readers(0, num_splits, df) + [len(df.index)] | Use a Ray task to read columns from HDF5 into a Pandas DataFrame.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
path_or_buf: The path of the HDF5 file.
columns: The list of column names to read.
num_splits: The number of partitions to split the column into.
Returns:
A list containing the split Pandas DataFrames and the Index as the last
element. If there is not `index_col` set, then we just return the length.
This is used to determine the total length of the DataFrame to build a
default Index. |
4,006 | def create_weekmatrices(user, split_interval=60):
if not float(24 * 60 / split_interval).is_integer():
raise ValueError(
"The minute interval set for the week-matrix structure does not evenly divide the day!")
contacts_in = partial(bc.individual.number_of_contacts,
direction=, interaction=, summary=None)
contacts_out = partial(bc.individual.number_of_contacts,
direction=, interaction=, summary=None)
calls_in = partial(bc.individual.number_of_interactions,
direction=, interaction=, summary=None)
calls_out = partial(bc.individual.number_of_interactions,
direction=, interaction=, summary=None)
texts_in = partial(bc.individual.number_of_interactions,
direction=, interaction=, summary=None)
texts_out = partial(bc.individual.number_of_interactions,
direction=, interaction=, summary=None)
time_spent_in = partial(bc.individual.call_duration,
direction=, interaction=, summary=None)
time_spent_out = partial(bc.individual.call_duration,
direction=, interaction=, summary=None)
core_func = [
(contacts_in, "scalar"),
(contacts_out, "scalar"),
(calls_in, "scalar"),
(calls_out, "scalar"),
(texts_in, "scalar"),
(texts_out, "scalar")
]
time_func = [
(time_spent_in, "summarystats"),
(time_spent_out, "summarystats")
]
wm = []
sections = [
(i + 1) * split_interval for i in range(7 * 24 * 60 // split_interval)]
temp_user = _extract_user_info(user)
for grouped_records in group_records(user.records, groupby=):
week_records = list(grouped_records)
time_spent_rec = _transform_to_time_spent(
week_records, split_interval, sections)
wm.extend(_calculate_channels(
week_records, sections, split_interval, core_func, temp_user))
wm.extend(_calculate_channels(
time_spent_rec, sections, split_interval, time_func, temp_user, len(core_func)))
return wm | Computes raw indicators (e.g. number of outgoing calls) for intervals of ~1
hour across each week of user data. These "week-matrices" are returned in a
nested list with each sublist containing [user.name, channel, weekday,
section, value].
Parameters
----------
user : object
The user to create week-matrices for.
split_interval : int
The interval in minutes for which each indicator is computed. Defaults to 60.
Needs to be able to split a day (24*60 minutes) evenly. |
4,007 | def check_install():
if platform.system() == and sys.executable != :
print("*" * 79)
print(textwrap.fill(
"WARNING: You are not using the version of Python included with "
"macOS. If you intend to use Voltron with the LLDB included "
"with Xcode, or GDB installed with Homebrew, it will not work "
"unless it is installed using the systemLinux':
try:
output = check_output([
"gdb", "-batch", "-q", "--nx", "-ex",
"pi print(sys.version_info.major)"
]).decode("utf-8")
gdb_python = int(output)
if gdb_python != sys.version_info.major:
print("*" * 79)
print(textwrap.fill(
"WARNING: You are installing Voltron using Python {0}.x "
"and GDB is linked with Python {1}.x. GDB will not be "
"able to load Voltron. Please install using Python {1} "
"if you intend to use Voltron with the copy of GDB that "
"is installed. See the following documentation for more "
"detailed installation instructions: "
"https://github.com/snare/voltron/wiki/Installation"
.format(sys.version_info.major, gdb_python), 79))
print("*" * 79)
except:
pass | Try to detect the two most common installation errors:
1. Installing on macOS using a Homebrew version of Python
2. Installing on Linux using Python 2 when GDB is linked with Python 3 |
4,008 | def mv(source, target):
if os.path.isfile(target) and len(source) == 1:
if click.confirm("Are you sure you want to overwrite %s?" % target):
err_msg = cli_syncthing_adapter.mv_edge_case(source, target)
if err_msg:
click.echo(err_msg)
return
if len(source) > 1 and not os.path.isdir(target):
click.echo(click.get_current_context().get_help())
return
else:
err_msg, err = cli_syncthing_adapter.mv(source, target)
if err_msg:
click.echo(err_msg, err) | Move synchronized directory. |
4,009 | def waitForCreation(self, timeout=10, notification=):
callback = AXCallbacks.returnElemCallback
retelem = None
args = (retelem,)
return self.waitFor(timeout, notification, callback=callback,
args=args) | Convenience method to wait for creation of some UI element.
Returns: The element created |
4,010 | def properties(self):
if self._property_manager is None:
self._property_manager = PropertyManager(session=self._session)
return self._property_manager | Property for accessing :class:`PropertyManager` instance, which is used to manage properties of the jobs.
:rtype: yagocd.resources.property.PropertyManager |
4,011 | def _make_jsmin(python_only=False):
if not python_only:
try:
import _rjsmin
except ImportError:
pass
else:
return _rjsmin.jsmin
try:
xrange
except NameError:
xrange = range
space_chars = r
line_comment = r
space_comment = r
space_comment_nobang = r
bang_comment = r
string1 = \
r
string2 = r
strings = r % (string1, string2)
charclass = r
nospecial = r
regex = r % (
nospecial, charclass, nospecial
)
space = r % (space_chars, space_comment)
space_nobang = r % (space_chars, space_comment_nobang)
newline = r % line_comment
def fix_charclass(result):
pos = result.find()
if pos >= 0:
result = r % (result[:pos], result[pos + 1:])
def sequentize(string):
first, last, result = None, None, []
for char in map(ord, string):
if last is None:
first = last = char
elif last + 1 == char:
last = char
else:
result.append((first, last))
first = last = char
if last is not None:
result.append((first, last))
return .join([ % (
chr(first),
last > first + 1 and or ,
last != first and chr(last) or
) for first, last in result])
return _re.sub(
r,
lambda m: % ord(m.group(1)), (
sequentize(result)
.replace(, )
.replace(, )
.replace(, )
)
)
def id_literal_(what):
match = _re.compile(what).match
result = .join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return % fix_charclass(result)
def not_id_literal_(keep):
match = _re.compile(id_literal_(keep)).match
result = .join([
chr(c) for c in xrange(127) if not match(chr(c))
])
return r % fix_charclass(result)
not_id_literal = not_id_literal_(r)
preregex1 = r
preregex2 = r % locals()
id_literal = id_literal_(r)
id_literal_open = id_literal_(r)
id_literal_close = id_literal_(r)
dull = r
space_sub_simple = _re.compile((
r
r
r
r
r
r
r
r
r
r
r
r
r
r
r
r
) % locals()).sub
def space_subber_simple(match):
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return
elif groups[5] or groups[6] or groups[7]:
return
else:
return
space_sub_banged = _re.compile((
r
r
r
r
r
r
r
r
r
r
r
r
r
r
r
r
r
) % dict(locals(), space=space_nobang)).sub
def space_subber_banged(match):
groups = match.groups()
if groups[0]:
return groups[0]
elif groups[1]:
return groups[1]
elif groups[2]:
return groups[2]
elif groups[3]:
return groups[3]
elif groups[4]:
return groups[4]
elif groups[5]:
return
elif groups[6] or groups[7] or groups[8]:
return
else:
return
def jsmin(script, keep_bang_comments=False):
r
if keep_bang_comments:
return space_sub_banged(
space_subber_banged, % script
).strip()
else:
return space_sub_simple(
space_subber_simple, % script
).strip()
return jsmin | Generate JS minifier based on `jsmin.c by Douglas Crockford`_
.. _jsmin.c by Douglas Crockford:
http://www.crockford.com/javascript/jsmin.c
:Parameters:
`python_only` : ``bool``
Use only the python variant. If true, the c extension is not even
tried to be loaded.
:Return: Minifier
:Rtype: ``callable`` |
4,012 | def print_progress_bar(iteration, total, prefix=, suffix=, decimals=1, length=100, fill=):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
print(percent)
if iteration == total:
print() | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) |
4,013 | def QWidget_factory(ui_file=None, *args, **kwargs):
file = ui_file or DEFAULT_UI_FILE
if not foundations.common.path_exists(file):
raise foundations.exceptions.FileExistsError("{0} | ui file doesn{0}(){1}{1}' attribute is not deletable!".format(
self.__class__.__name__, "ui_file"))
def show(self, setGeometry=True):
if not setGeometry:
super(QWidget, self).show()
return
wasHidden = not self.isVisible()
if self.__geometry is None and wasHidden:
center_widget_on_screen(self)
super(QWidget, self).show()
if self.__geometry is not None and wasHidden:
self.restoreGeometry(self.__geometry)
def closeEvent(self, event):
self.__geometry = self.saveGeometry()
event.accept()
return QWidget | Defines a class factory creating `QWidget <http://doc.qt.nokia.com/qwidget.html>`_ classes
using given ui file.
:param ui_file: Ui file.
:type ui_file: unicode
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
:return: QWidget class.
:rtype: QWidget |
4,014 | def remove(self, fieldspec):
pattern = r
match = re.match(pattern, fieldspec)
if not match:
return None
grp = match.groupdict()
for field in self.get_fields(grp[]):
if grp[]:
updated = []
for code, value in pairwise(field.subfields):
if not code == grp[]:
updated += [code, value]
if not updated:
self.remove_field(field)
else:
field.subfields = updated
else:
self.remove_field(field) | Removes fields or subfields according to `fieldspec`.
If a non-control field subfield removal leaves no other subfields,
delete the field entirely. |
4,015 | def lockToColumn(self, index):
self._lockColumn = index
if index is None:
self.__destroyLockedView()
return
else:
if not self._lockedView:
view = QtGui.QTreeView(self.parent())
view.setModel(self.model())
view.setSelectionModel(self.selectionModel())
view.setItemDelegate(self.itemDelegate())
view.setFrameShape(view.NoFrame)
view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setRootIsDecorated(self.rootIsDecorated())
view.setUniformRowHeights(True)
view.setFocusProxy(self)
view.header().setFocusProxy(self.header())
view.setStyleSheet(self.styleSheet())
view.setAutoScroll(False)
view.setSortingEnabled(self.isSortingEnabled())
view.setPalette(self.palette())
view.move(self.x(), self.y())
self.setAutoScroll(False)
self.setUniformRowHeights(True)
view.collapsed.connect(self.collapse)
view.expanded.connect(self.expand)
view.expanded.connect(self.__updateLockedView)
view.collapsed.connect(self.__updateLockedView)
view_head = view.header()
for i in range(self.columnCount()):
view_head.setResizeMode(i, self.header().resizeMode(i))
view.header().sectionResized.connect(self.__updateStandardSection)
self.header().sectionResized.connect(self.__updateLockedSection)
vbar = view.verticalScrollBar()
self.verticalScrollBar().valueChanged.connect(vbar.setValue)
self._lockedView = view
self.__updateLockedView() | Sets the column that the tree view will lock to. If None is supplied,
then locking will be removed.
:param index | <int> || None |
4,016 | def server(request):
return direct_to_template(
request,
,
{: getViewURL(request, idPage),
: getViewURL(request, idpXrds),
}) | Respond to requests for the server's primary web page. |
4,017 | def discover(language):
debug(%s\ % (language,))
global scrapers, discovered
for language in scrapers.iterkeys():
discovered[language] = {}
for scraper in scrapers[language]:
blacklist = [, , ]
methods = [method for method in dir(scraper) if method not in blacklist and not method.startswith() and callable(getattr(scraper, method))]
for method in methods:
if discovered[language].has_key(method):
discovered[language][method].append(scraper)
else:
discovered[language][method] = [scraper]
debug(%s\ % (len(scrapers[language]), len(discovered[language].keys()), language)) | Discovers all registered scrapers to be used for the generic scraping interface. |
4,018 | def check_bidi(data):
has_l = False
has_ral = False
for char in data:
if stringprep.in_table_d1(char):
has_ral = True
elif stringprep.in_table_d2(char):
has_l = True
if has_l and has_ral:
raise StringprepError("Both RandALCat and LCat characters present")
if has_ral and (not stringprep.in_table_d1(data[0])
or not stringprep.in_table_d1(data[-1])):
raise StringprepError("The first and the last character must"
" be RandALCat")
return data | Checks if sting is valid for bidirectional printing. |
4,019 | def handle_battery_level(msg):
if not msg.gateway.is_sensor(msg.node_id):
return None
msg.gateway.sensors[msg.node_id].battery_level = msg.payload
msg.gateway.alert(msg)
return None | Process an internal battery level message. |
4,020 | def getScans(self, modifications=True, fdr=True):
if not self.scans:
for i in self:
yield i
else:
for i in self.scans.values():
yield i
yield None | get a random scan |
4,021 | def _vote_disagreement(self, votes):
ret = []
for candidate in votes:
ret.append(0.0)
lab_count = {}
for lab in candidate:
lab_count[lab] = lab_count.setdefault(lab, 0) + 1
for lab in lab_count.keys():
ret[-1] -= lab_count[lab] / self.n_students * \
math.log(float(lab_count[lab]) / self.n_students)
return ret | Return the disagreement measurement of the given number of votes.
It uses the vote vote to measure the disagreement.
Parameters
----------
votes : list of int, shape==(n_samples, n_students)
The predictions that each student gives to each sample.
Returns
-------
disagreement : list of float, shape=(n_samples)
The vote entropy of the given votes. |
4,022 | def describe(self, **kwargs):
api_method = dxpy.api.container_describe
if isinstance(self, DXProject):
api_method = dxpy.api.project_describe
self._desc = api_method(self._dxid, **kwargs)
return self._desc | :returns: A hash containing attributes of the project or container.
:rtype: dict
Returns a hash with key-value pairs as specified by the API
specification for the `/project-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject-xxxx%2Fdescribe>`_
method. This will usually include keys such as "id", "name",
"class", "billTo", "created", "modified", and "dataUsage". |
4,023 | async def stop(self):
for task in self.__tracks.values():
if task is not None:
task.cancel()
self.__tracks = {} | Stop discarding media. |
4,024 | def ko_model(model, field_names=None, data=None):
try:
if isinstance(model, str):
modelName = model
else:
modelName = model.__class__.__name__
if field_names:
fields = field_names
else:
fields = get_fields(model)
if hasattr(model, "comparator"):
comparator = str(model.comparator())
else:
comparator =
modelViewString = render_to_string(
"knockout_modeler/model.js",
{: modelName, : fields, : data, : comparator}
)
return modelViewString
except Exception as e:
logger.exception(e)
return | Given a model, returns the Knockout Model and the Knockout ViewModel.
Takes optional field names and data. |
4,025 | def _get_cpu_info_from_registry():
try:
if not DataSource.is_windows:
return {}
processor_brand = DataSource.winreg_processor_brand().strip()
vendor_id = DataSource.winreg_vendor_id_raw()
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
if hz_advertised == :
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
flags = {
: is_set(0),
: is_set(1),
: is_set(2),
: is_set(3),
: is_set(4),
: is_set(5),
: is_set(6),
: is_set(7),
: is_set(8),
: is_set(9),
: is_set(10),
: is_set(11),
: is_set(12),
: is_set(13),
: is_set(14),
: is_set(15),
: is_set(16),
: is_set(17),
: is_set(18),
: is_set(19),
: is_set(21),
: is_set(22),
: is_set(23),
: is_set(24),
: is_set(25),
: is_set(26),
: is_set(27),
: is_set(29),
: is_set(30),
: is_set(31)
}
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
: vendor_id,
: processor_brand,
: _hz_short_to_friendly(hz_advertised, scale),
: _hz_short_to_friendly(hz_actual, 6),
: _hz_short_to_full(hz_advertised, scale),
: _hz_short_to_full(hz_actual, 6),
: flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {} | FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows. |
4,026 | def index():
if current_app.config[] is not None:
override = current_app.config[]
results = (models.TaskResult.query
.join(models.Task)
.filter(models.Task.playbook_id.in_(override)))
else:
results = models.TaskResult.query.all()
return render_template(, results=results) | This is not served anywhere in the web application.
It is used explicitly in the context of generating static files since
flask-frozen requires url_for's to crawl content.
url_for's are not used with result.show_result directly and are instead
dynamically generated through javascript for performance purposes. |
4,027 | def list_resource_commands(self):
resource_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
))
answer = set([])
for _, name, _ in pkgutil.iter_modules([resource_path]):
res = tower_cli.get_resource(name)
if not getattr(res, , False):
answer.add(name)
return sorted(answer) | Returns a list of multi-commands for each resource type. |
4,028 | def patch_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
kwargs[] = True
if kwargs.get():
return self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data | partially update status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V2beta1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread. |
4,029 | def command_max_delay(self, event=None):
try:
max_delay = self.max_delay_var.get()
except ValueError:
max_delay = self.runtime_cfg.max_delay
if max_delay < 0:
max_delay = self.runtime_cfg.max_delay
if max_delay > 0.1:
max_delay = self.runtime_cfg.max_delay
self.runtime_cfg.max_delay = max_delay
self.max_delay_var.set(self.runtime_cfg.max_delay) | CPU burst max running time - self.runtime_cfg.max_delay |
4,030 | def _read_to_buffer(self) -> Optional[int]:
try:
while True:
try:
if self._user_read_buffer:
buf = memoryview(self._read_buffer)[
self._read_buffer_size :
]
else:
buf = bytearray(self.read_chunk_size)
bytes_read = self.read_from_fd(buf)
except (socket.error, IOError, OSError) as e:
if errno_from_exception(e) == errno.EINTR:
continue
if self._is_connreset(e):
self.close(exc_info=e)
return None
self.close(exc_info=e)
raise
break
if bytes_read is None:
return 0
elif bytes_read == 0:
self.close()
return 0
if not self._user_read_buffer:
self._read_buffer += memoryview(buf)[:bytes_read]
self._read_buffer_size += bytes_read
finally:
del buf
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return bytes_read | Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception. |
4,031 | def upload(ctx, product, git_ref, dirname, aws_id, aws_secret, ci_env,
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron,
skip_upload):
logger = logging.getLogger(__name__)
if skip_upload:
click.echo()
sys.exit(0)
logger.debug(, ci_env)
logger.debug(
,
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron)
if ci_env == and \
_should_skip_travis_event(
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron):
sys.exit(0)
ensure_login(ctx)
git_refs = _get_git_refs(ci_env, git_ref)
build_resource = register_build(
ctx.obj[],
ctx.obj[],
product,
git_refs
)
logger.debug(, build_resource)
| Upload a new site build to LSST the Docs. |
4,032 | def serial_ports():
if sys.platform.startswith():
ports = [ % (i + 1) for i in range(256)]
elif sys.platform.startswith() or sys.platform.startswith():
ports = glob.glob()
elif sys.platform.startswith():
ports = glob.glob()
else:
raise EnvironmentError()
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException) as e:
hfoslog(, port, e, type(e),
exc=True, lvl=warn)
return result | Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
Courtesy: Thomas ( http://stackoverflow.com/questions/12090503
/listing-available-com-ports-with-python ) |
4,033 | def build_asset_array(assets_by_site, tagnames=(), time_event=None):
for assets in assets_by_site:
if len(assets):
first_asset = assets[0]
break
else:
raise ValueError()
loss_types = []
occupancy_periods = []
for name in sorted(first_asset.values):
if name.startswith():
period = name.split(, 1)[1]
if period != :
occupancy_periods.append(period)
loss_types.append(name)
else:
loss_types.append( + name)
deductible_d = first_asset.deductibles or {}
limit_d = first_asset.insurance_limits or {}
if deductible_d or limit_d:
logging.warning(
)
retro = [] if first_asset._retrofitted else []
float_fields = loss_types + retro
int_fields = [(str(name), U16) for name in tagnames]
tagi = {str(name): i for i, name in enumerate(tagnames)}
asset_dt = numpy.dtype(
[(, U32), (, F32), (, F32), (, U32),
(, F32), (, F32)] + [
(str(name), float) for name in float_fields] + int_fields)
num_assets = sum(len(assets) for assets in assets_by_site)
assetcol = numpy.zeros(num_assets, asset_dt)
asset_ordinal = 0
fields = set(asset_dt.fields)
for sid, assets_ in enumerate(assets_by_site):
for asset in assets_:
asset.ordinal = asset_ordinal
record = assetcol[asset_ordinal]
asset_ordinal += 1
for field in fields:
if field == :
value = asset.ordinal
elif field == :
value = asset.number
elif field == :
value = asset.area
elif field == :
value = sid
elif field == :
value = asset.location[0]
elif field == :
value = asset.location[1]
elif field.startswith():
value = asset.values[field]
elif field == :
value = asset.retrofitted()
elif field in tagnames:
value = asset.tagidxs[tagi[field]]
else:
name, lt = field.split()
value = asset.value(lt, time_event)
record[field] = value
return assetcol, .join(occupancy_periods) | :param assets_by_site: a list of lists of assets
:param tagnames: a list of tag names
:returns: an array `assetcol` |
4,034 | def is_successful(self, retry=False):
if not self.is_terminated(retry=retry):
return False
retry_num = options.retry_times
while retry_num > 0:
try:
statuses = self.get_task_statuses()
return all(task.status == Instance.Task.TaskStatus.SUCCESS
for task in statuses.values())
except (errors.InternalServerError, errors.RequestTimeTooSkewed):
retry_num -= 1
if not retry or retry_num <= 0:
raise | If the instance runs successfully.
:return: True if successful else False
:rtype: bool |
4,035 | def add_report(self, specification_name, report):
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total - self._failures - self._errors | Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The |
4,036 | def parse(self):
parser = self.subparser.add_parser(
"show",
help="Show workspace details",
description="Show workspace details.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(, action=, help="All workspaces")
group.add_argument(, type=str, help="Workspace name", nargs=) | Parse show subcommand. |
4,037 | def squeeze(attrs, inputs, proto_obj):
new_attrs = translation_utils._fix_attribute_names(attrs,
{ : })
return , new_attrs, inputs | Remove single-dimensional entries from the shape of a tensor. |
4,038 | def get_chart(self, id, **kwargs):
resp = self._get_object_by_name(self._CHART_ENDPOINT_SUFFIX, id,
**kwargs)
return resp | Retrieve a (v2) chart by id. |
4,039 | def p_multiplicative_expr(self, p):
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.BinOp(op=p[2], left=p[1], right=p[3]) | multiplicative_expr : unary_expr
| multiplicative_expr MULT unary_expr
| multiplicative_expr DIV unary_expr
| multiplicative_expr MOD unary_expr |
4,040 | def get_text(self):
return u.join(u.format(b) for b in self.text) | Get the text in its current state. |
4,041 | def just_find_proxy(pacfile, url, host=None):
if not os.path.isfile(pacfile):
raise IOError(.format(pacfile))
init()
parse_pac(pacfile)
proxy = find_proxy(url,host)
cleanup()
return proxy | This function is a wrapper around init, parse_pac, find_proxy
and cleanup. This is the function to call if you want to find
proxy just for one url. |
4,042 | def _runcog(options, files, uncog=False):
options.order(, , add_rest=True)
c = Cog()
if uncog:
c.options.bNoGenerate = True
c.options.bReplace = True
c.options.bDeleteCode = options.get("delete_code", False)
includedir = options.get(, None)
if includedir:
include = Includer(includedir, cog=c,
include_markers=options.get("include_markers"))
basedir = path(basedir)
if not files:
pattern = options.get("pattern", "*.rst")
if pattern:
files = basedir.walkfiles(pattern)
else:
files = basedir.walkfiles()
for f in sorted(files):
dry("cog %s" % f, c.processOneFile, f) | Common function for the cog and runcog tasks. |
4,043 | def print_object_results(obj_result):
print_results_header(obj_result.object_id, obj_result.is_valid)
if obj_result.warnings:
print_warning_results(obj_result, 1)
if obj_result.errors:
print_schema_results(obj_result, 1) | Print the results of validating an object.
Args:
obj_result: An ObjectValidationResults instance. |
4,044 | def parse_summary(self, contents):
lines = contents.strip().split()
data = {}
for row in lines[1:]:
split = row.strip().split()
sample = split[0]
data[sample] = {
: int(split[1]),
: int(split[2]),
: int(split[3])
}
return data | Parses summary file into a dictionary of counts. |
4,045 | def _onError(self, message):
self.isOK = False
if message.strip() != "":
self.errors.append(message) | Memorizies a parser error message |
4,046 | def _import_lua_dependencies(lua, lua_globals):
if sys.platform not in (, ):
import ctypes
ctypes.CDLL(, mode=ctypes.RTLD_GLOBAL)
try:
lua_globals.cjson = lua.eval()
except RuntimeError:
raise RuntimeError("cjson not installed") | Imports lua dependencies that are supported by redis lua scripts.
The current implementation is fragile to the target platform and lua version
and may be disabled if these imports are not needed.
Included:
- cjson lib.
Pending:
- base lib.
- table lib.
- string lib.
- math lib.
- debug lib.
- cmsgpack lib. |
4,047 | def transpose(self, rows):
res = OrderedDict()
for row, cols in rows.items():
for col, cell in cols.items():
if col not in res:
res[col] = OrderedDict()
res[col][row] = cell
return res | Transposes the grid to allow for cols |
4,048 | def _example_from_allof(self, prop_spec):
example_dict = {}
for definition in prop_spec[]:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict | Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict |
4,049 | def patchURL(self, url, headers, body):
return self._load_resource("PATCH", url, headers, body) | Request a URL using the HTTP method PATCH. |
4,050 | def _get_position(self):
portfolio_code = self.account_config["portfolio_code"]
portfolio_info = self._get_portfolio_info(portfolio_code)
position = portfolio_info["view_rebalancing"]
stocks = position["holdings"]
return stocks | 获取雪球持仓
:return: |
4,051 | def serialize_filesec(self):
if (
self.type.lower() not in ("item", "archival information package")
or self.use is None
):
return None
el = etree.Element(utils.lxmlns("mets") + "file", ID=self.file_id())
if self.group_id():
el.attrib["GROUPID"] = self.group_id()
if self.admids:
el.set("ADMID", " ".join(self.admids))
if self.checksum and self.checksumtype:
el.attrib["CHECKSUM"] = self.checksum
el.attrib["CHECKSUMTYPE"] = self.checksumtype
if self.path:
flocat = etree.SubElement(el, utils.lxmlns("mets") + "FLocat")
try:
flocat.set(utils.lxmlns("xlink") + "href", utils.urlencode(self.path))
except ValueError:
raise exceptions.SerializeError(
" URL.".format(self.path)
)
flocat.set("LOCTYPE", "OTHER")
flocat.set("OTHERLOCTYPE", "SYSTEM")
for transform_file in self.transform_files:
transform_file_el = etree.SubElement(
el, utils.lxmlns("mets") + "transformFile"
)
for key, val in transform_file.items():
attribute = "transform{}".format(key).upper()
transform_file_el.attrib[attribute] = str(val)
return el | Return the file Element for this file, appropriate for use in a fileSec.
If this is not an Item or has no use, return None.
:return: fileSec element for this FSEntry |
4,052 | def gradient_factory(name):
if name == :
def gradient(self):
return cos(self.domain)
elif name == :
def gradient(self):
return -sin(self.domain)
elif name == :
def gradient(self):
return 1 + square(self.domain) * self
elif name == :
def gradient(self):
return FunctionalQuotient(ConstantFunctional(self.domain, 0.5),
self)
elif name == :
def gradient(self):
return ScalingFunctional(self.domain, 2.0)
elif name == :
def gradient(self):
return reciprocal(self.domain)
elif name == :
def gradient(self):
return self
elif name == :
def gradient(self):
return FunctionalQuotient(ConstantFunctional(self.domain, -1.0),
square(self.domain))
elif name == :
def gradient(self):
return cosh(self.domain)
elif name == :
def gradient(self):
return sinh(self.domain)
else:
gradient = Functional.gradient
return gradient | Create gradient `Functional` for some ufuncs. |
4,053 | def register_hooks(app):
@app.before_request
def before_request():
g.user = get_current_user()
if g.user and g.user.is_admin:
g._before_request_time = time.time()
@app.after_request
def after_request(response):
if hasattr(g, ):
delta = time.time() - g._before_request_time
response.headers[] = delta * 1000
return response | Register hooks. |
4,054 | def issue_add_comment(self, issue_key, comment, visibility=None):
url = .format(issueIdOrKey=issue_key)
data = {: comment}
if visibility:
data[] = visibility
return self.post(url, data=data) | Add comment into Jira issue
:param issue_key:
:param comment:
:param visibility: OPTIONAL
:return: |
4,055 | def on_change(self, path, event_type):
for result in results:
collection_id = result[0]
sql =
cursor = self._execute(sql, (collection_id,))
self._load_keywords(collection_id, path=path) | Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file |
4,056 | def get_full_durable_object(arn, event_time, durable_model):
LOG.debug(f)
item = list(durable_model.query(arn, durable_model.eventTime == event_time))
if not item:
LOG.error(f
f)
raise DurableItemIsMissingException({"item_arn": arn, "event_time": event_time})
return item[0] | Utility method to fetch items from the Durable table if they are too big for SNS/SQS.
:param record:
:param durable_model:
:return: |
4,057 | def tz_localize(self, tz, ambiguous=, nonexistent=,
errors=None):
if errors is not None:
warnings.warn("The errors argument is deprecated and will be "
"removed in a future release. Use "
"nonexistent= or nonexistent= "
"instead.", FutureWarning)
if errors == :
nonexistent =
elif errors == :
nonexistent =
else:
raise ValueError("The errors argument must be either "
"or .")
nonexistent_options = (, , ,
)
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta):
raise ValueError("The nonexistent argument must be one of ,"
" , , or"
" a timedelta object")
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC,
self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
new_dates = conversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent,
)
new_dates = new_dates.view(_NS_DTYPE)
dtype = tz_to_dtype(tz)
return self._simple_new(new_dates, dtype=dtype, freq=self.freq) | Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
Time zone localization helps to switch from time zone aware to time
zone unaware objects.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
errors : {'raise', 'coerce'}, default None
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified time zone (e.g. due to a transition from
or to DST time). Use ``nonexistent='raise'`` instead.
- 'coerce' will return NaT if the timestamp can not be converted
to the specified time zone. Use ``nonexistent='NaT'`` instead.
.. deprecated:: 0.24.0
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq='D')
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw']
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, 'Europe/Warsaw'] |
4,058 | def to_dict(self):
rv = {: self.code}
if not self.is_native():
rv[] = self.issuer
rv[] = self.type
else:
rv[] =
return rv | Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset` |
4,059 | def apply_reactions(self, user):
if self.global_limit:
self._log.info(.format(num=self.global_limit))
self.agentml.set_limit(self, (time() + self.global_limit), self.glimit_blocking)
if self.user_limit:
self._log.info(.format(num=self.user_limit))
user.set_limit(self, (time() + self.user_limit), self.ulimit_blocking)
for var in self.vars:
var_type, var_name, var_value = var
var_name = .join(map(str, var_name)) if isinstance(var_name, Iterable) else var_name
var_value = .join(map(str, var_value)) if isinstance(var_value, Iterable) else var_value
if var_type == :
self.user.set_var(var_name, var_value)
if var_type == :
self.agentml.set_var(var_name, var_value) | Set active topics and limits after a response has been triggered
:param user: The user triggering the response
:type user: agentml.User |
4,060 | def js_extractor(response):
matches = rscript.findall(response)
for match in matches:
match = match[2].replace("JS file', match)
bad_scripts.add(match) | Extract js files from the response body |
4,061 | def unpack_value(format_string, stream):
message_bytes = stream.read(struct.calcsize(format_string))
return struct.unpack(format_string, message_bytes) | Helper function to unpack struct data from a stream and update the signature verifier.
:param str format_string: Struct format string
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Unpacked values
:rtype: tuple |
4,062 | def prepare_files(self, finder):
from pip.index import Link
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound as exc:
not_found = exc
else:
req_to_install.url = url.url
if not best_installed:
if os.path.exists(os.path.join(location, )):
raise PreviousBuildDirError(
"pip can%st upgrade when therepip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
url, location, download_dir,
do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
,
req_to_install,
exc,
)
raise InstallationError(
%
(req_to_install, exc, url)
)
else:
unpack = False
if unpack:
is_wheel = url and url.filename.endswith(wheel_ext)
if self.is_download:
req_to_install.source_dir = location
if not is_wheel:
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
req_to_install.assert_source_matches_version()
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
if dist.has_metadata():
finder.add_dependency_links(
dist.get_metadata_lines()
)
if not self.ignore_dependencies:
for subreq in dist.requires(
req_to_install.extras):
if self.has_requirement(
subreq.project_name):
continue
subreq = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
self.add_requirement(req_to_install)
if (self.is_download or
req_to_install._temp_build_dir is not None):
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install) | Prepare process. Create temp directories, download and/or unpack files. |
4,063 | def make_serializable(data, mutable=True, key_stringifier=lambda x:x, simplify_midnight_datetime=True):
r
if isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
elif isinstance(data, Model):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
elif isinstance(data, Mapping):
mapping = tuple((make_serializable(k, mutable=False, key_stringifier=key_stringifier), make_serializable(v, mutable=mutable)) for (k, v) in data.iteritems())
if mutable:
return dict(mapping)
return mapping
elif hasattr(data, ):
if mutable:
return list(make_serializable(v, mutable=mutable) for v in data)
else:
return key_stringifier(tuple(make_serializable(v, mutable=mutable) for v in data))
elif isinstance(data, (float, Decimal)):
return float(data)
elif isinstance(data, basestring):
data = db.clean_utf8(data)
try:
return int(data)
except:
try:
return float(data)
except:
try:
return make_serializable(dateutil.parse(unicode(data)))
except:
try:
return make_serializable(data.__dict__)
except:
return unicode(data) | r"""Make sure the data structure is json serializable (json.dumps-able), all they way down to scalars in nested structures.
If mutable=False then return tuples for all iterables, except basestrings (strs),
so that they can be used as keys in a Mapping (dict).
>>> from collections import OrderedDict
>>> from decimal import Decimal
>>> data = {'x': Decimal('01.234567891113151719'), 'X': [{('y', 'z'): {'q': 'A\xFFB'}}, 'ender'] }
>>> make_serializable(OrderedDict(data)) == {'X': [{('y', 'z'): {'q': 'A\xc3\xbfB'}}, 'ender'], 'x': 1.2345678911131517}
True
>>> make_serializable({'ABCs': list('abc'), datetime.datetime(2014,10,31): datetime.datetime(2014,10,31,23,59,59)}
... ) == {'ABCs': ['2014-10-16 00:00:00', 'b', 'c'], '2014-10-31 00:00:00': '2014-10-31 23:59:59'}
True |
4,064 | def build_from_token_counts(self, token_counts, min_count, num_iterations=4):
self._init_alphabet_from_tokens(six.iterkeys(token_counts))
self._init_subtokens_from_list(list(self._alphabet))
if min_count < 1:
min_count = 1
for i in xrange(num_iterations):
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
for end in xrange(start + 1, len(escaped_token) + 1):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
new_subtoken_strings = []
for lsub in xrange(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in xrange(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
self._init_subtokens_from_list([subtoken for _, subtoken in new_subtoken_strings]) | Train a SubwordTextTokenizer based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer; how many iterations of refinement. |
4,065 | def get(self, buffer_type, offset):
if buffer_type == u:
chosen_buffer = self.streaming_data
else:
chosen_buffer = self.storage_data
if offset >= len(chosen_buffer):
raise StreamEmptyError("Invalid index given in get command", requested=offset, stored=len(chosen_buffer), buffer=buffer_type)
return chosen_buffer[offset] | Get a reading from the buffer at offset.
Offset is specified relative to the start of the data buffer.
This means that if the buffer rolls over, the offset for a given
item will appear to change. Anyone holding an offset outside of this
engine object will need to be notified when rollovers happen (i.e.
popn is called so that they can update their offset indices)
Args:
buffer_type (str): The buffer to pop from (either u"storage" or u"streaming")
offset (int): The offset of the reading to get |
4,066 | def update(self, report):
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title)) | Add the items from the given report. |
4,067 | def plot_hurst_hist():
import matplotlib.pyplot as plt
hs = [nolds.hurst_rs(np.random.random(size=10000), corrected=True) for _ in range(100)]
plt.hist(hs, bins=20)
plt.xlabel("esimated value of hurst exponent")
plt.ylabel("number of experiments")
plt.show() | Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
This function requires the package ``matplotlib``. |
4,068 | def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
starttime = time.time()
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) | Enable VXLAN on the switch. |
4,069 | def delete_repository(self, repository, params=None):
if repository in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument .")
return self.transport.perform_request(,
_make_path(, repository), params=params) | Removes a shared file system repository.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html>`_
:arg repository: A comma-separated list of repository names
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout |
4,070 | def example_describe_configs(a, args):
resources = [ConfigResource(restype, resname) for
restype, resname in zip(args[0::2], args[1::2])]
fs = a.describe_configs(resources)
for res, f in fs.items():
try:
configs = f.result()
for config in iter(configs.values()):
print_config(config, 1)
except KafkaException as e:
print("Failed to describe {}: {}".format(res, e))
except Exception:
raise | describe configs |
4,071 | def get_version(self, paths=None, default="unknown"):
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v | Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'. |
4,072 | def calc_stress_tf(self, lin, lout, damped):
tf = self.calc_strain_tf(lin, lout)
if damped:
tf *= lout.layer.comp_shear_mod
else:
tf *= lout.layer.shear_mod
return tf | Compute the stress transfer function.
Parameters
----------
lin : :class:`~site.Location`
Location of input
lout : :class:`~site.Location`
Location of output. Note that this would typically be midheight
of the layer. |
4,073 | def _intertext_score(full_text):
sentences = sentence_tokenizer(full_text)
norm = _normalize(sentences)
similarity_matrix = pairwise_kernels(norm, metric=)
scores = _textrank(similarity_matrix)
scored_sentences = []
for i, s in enumerate(sentences):
scored_sentences.append((scores[i],i,s))
top_scorers = sorted(scored_sentences,
key=lambda tup: tup[0],
reverse=True)
return top_scorers | returns tuple of scored sentences
in order of appearance
Note: Doing an A/B test to
compare results, reverting to
original algorithm. |
4,074 | def constrains(self):
params = []
for constraint in self.in_constraints:
for var in constraint._vars:
param = var.get_parameter()
if param.component == constraint.component and param.qualifier == constraint.qualifier:
if param not in params and param.uniqueid != self.uniqueid:
params.append(param)
return params | returns a list of parameters that are constrained by this parameter |
4,075 | def check_ns_run_threads(run):
assert run[].dtype == int
uniq_th = np.unique(run[])
assert np.array_equal(
np.asarray(range(run[].shape[0])), uniq_th), \
str(uniq_th)
assert np.any(run[][:, 0] == -np.inf), (
+
)
for th_lab in uniq_th:
inds = np.where(run[] == th_lab)[0]
th_info = .format(
th_lab, run[][inds[0]], run[][th_lab, :])
assert run[][th_lab, 0] <= run[][inds[0]], (
+
th_info + .format(
run[][inds[0]] - run[][th_lab, 0]))
assert run[][th_lab, 1] == run[][inds[-1]], (
+ th_info) | Check thread labels and thread_min_max have expected properties.
Parameters
----------
run: dict
Nested sampling run to check.
Raises
------
AssertionError
If run does not have expected properties. |
4,076 | def get_appium_sessionId(self):
self._info("Appium Session ID: " + self._current_application().session_id)
return self._current_application().session_id | Returns the current session ID as a reference |
4,077 | def git_pull(repo_dir, remote="origin", ref=None, update_head_ok=False):
command = [, ]
if update_head_ok:
command.append()
command.append(pipes.quote(remote))
if ref:
command.append(ref)
return execute_git_command(command, repo_dir=repo_dir) | Do a git pull of `ref` from `remote`. |
4,078 | def encode(locations):
encoded = (
(_encode_value(lat, prev_lat), _encode_value(lon, prev_lon))
for (prev_lat, prev_lon), (lat, lon)
in _iterate_with_previous(locations, first=(0, 0))
)
encoded = chain.from_iterable(encoded)
return .join(c for r in encoded for c in r) | :param locations: locations list containig (lat, lon) two-tuples
:return: encoded polyline string |
4,079 | def confd_state_internal_callpoints_typepoint_registration_type_range_range_daemon_error(self, **kwargs):
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
callpoints = ET.SubElement(internal, "callpoints")
typepoint = ET.SubElement(callpoints, "typepoint")
id_key = ET.SubElement(typepoint, "id")
id_key.text = kwargs.pop()
registration_type = ET.SubElement(typepoint, "registration-type")
range = ET.SubElement(registration_type, "range")
range = ET.SubElement(range, "range")
daemon = ET.SubElement(range, "daemon")
error = ET.SubElement(daemon, "error")
error.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
4,080 | def _get_file_by_alias(part, files):
if _is_output(part):
return Output.from_string(part.pop())
else:
inputs = [[]]
if part.magic_or:
and_or =
else:
and_or =
for cut in part.asList():
if cut == OR_TOKEN:
inputs.append([])
continue
if cut == AND_TOKEN:
continue
input = Input(cut, filename=cut, and_or=and_or)
for file in files:
if file.alias == cut:
input.filename = file.filename
inputs[-1].append(input)
break
else:
inputs[-1].append(input)
return [input for input in inputs if input] | Given a command part, find the file it represents. If not found,
then returns a new token representing that file.
:throws ValueError: if the value is not a command file alias. |
4,081 | def averageConvergencePoint(self, prefix, minOverlap, maxOverlap,
settlingTime=1, firstStat=0, lastStat=None):
convergenceSum = 0.0
numCorrect = 0.0
inferenceLength = 1000000
for stats in self.statistics[firstStat:lastStat]:
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
inferenceLength = len(stats[key])
columnConvergence = L4L2Experiment._locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint) / settlingTime)
if ceil(float(convergencePoint) / settlingTime) <= inferenceLength:
numCorrect += 1
if len(self.statistics[firstStat:lastStat]) == 0:
return 10000.0, 0.0
return (convergenceSum / len(self.statistics[firstStat:lastStat]),
numCorrect / len(self.statistics[firstStat:lastStat]) ) | For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time and accuracy across all objects.
Using inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration and accuracy across
all runs.
:param prefix: Use this prefix to filter relevant stats.
:param minOverlap: Min target overlap
:param maxOverlap: Max target overlap
:param settlingTime: Setting time between iteration. Default 1
:return: Average settling iteration and accuracy across all runs |
4,082 | def get_orm_column_names(cls: Type, sort: bool = False) -> List[str]:
colnames = [col.name for col in get_orm_columns(cls)]
return sorted(colnames) if sort else colnames | Gets column names (that is, database column names) from an SQLAlchemy
ORM class. |
4,083 | def expr(args):
from jcvi.graphics.base import red_purple as default_cm
p = OptionParser(expr.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) != 4:
sys.exit(not p.print_help())
block, exp, layout, napusbed = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
s = Synteny(fig, root, block, napusbed, layout)
fp = open(exp)
data = {}
for row in fp:
gid, lf, rt = row.split()
lf, rt = float(lf), float(rt)
data[gid] = (lf, rt)
rA, rB = s.rr
gA = [x.accn for x in rA.genes]
gC = [x.accn for x in rB.genes]
A = [data.get(x, (0, 0)) for x in gA]
C = [data.get(x, (0, 0)) for x in gC]
A = np.array(A)
C = np.array(C)
A = np.transpose(A)
C = np.transpose(C)
d, h = .01, .1
lsg = "lightslategrey"
coords = s.gg
axes = []
for j, (y, gg) in enumerate(((.79, gA), (.24, gC))):
r = s.rr[j]
x = r.xstart
w = r.xend - r.xstart
ax = fig.add_axes([x, y, w, h])
axes.append(ax)
root.add_patch(Rectangle((x - h, y - d), w + h + d, h + 2 * d, fill=False,
ec=lsg, lw=1))
root.text(x - d, y + 3 * h / 4, "root", ha="right", va="center")
root.text(x - d, y + h / 4, "leaf", ha="right", va="center")
ty = y - 2 * d if y > .5 else y + h + 2 * d
nrows = len(gg)
for i, g in enumerate(gg):
start, end = coords[(j, g)]
sx, sy = start
ex, ey = end
assert sy == ey
sy = sy + 2 * d if sy > .5 else sy - 2 * d
root.plot(((sx + ex) / 2, x + w * (i + .5) / nrows), (sy, ty),
lw=1, ls=":", color="k", alpha=.2)
axA, axC = axes
p = axA.pcolormesh(A, cmap=default_cm)
p = axC.pcolormesh(C, cmap=default_cm)
axA.set_xlim(0, len(gA))
axC.set_xlim(0, len(gC))
x, y, w, h = .35, .1, .3, .05
ax_colorbar = fig.add_axes([x, y, w, h])
fig.colorbar(p, cax=ax_colorbar, orientation=)
root.text(x - d, y + h / 2, "RPKM", ha="right", va="center")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
for x in (axA, axC, root):
x.set_axis_off()
image_name = "napusf4b." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog expr block exp layout napus.bed
Plot a composite figure showing synteny and the expression level between
homeologs in two tissues - total 4 lists of values. block file contains the
gene pairs between AN and CN. |
4,084 | def area(self):
r
if self._dimension != 2:
raise NotImplementedError(
"2D is the only supported dimension",
"Current dimension",
self._dimension,
)
edge1, edge2, edge3 = self._get_edges()
return _surface_helpers.compute_area(
(edge1._nodes, edge2._nodes, edge3._nodes)
) | r"""The area of the current surface.
For surfaces in :math:`\mathbf{R}^2`, this computes the area via
Green's theorem. Using the vector field :math:`\mathbf{F} =
\left[-y, x\right]^T`, since :math:`\partial_x(x) - \partial_y(-y) = 2`
Green's theorem says twice the area is equal to
.. math::
\int_{B\left(\mathcal{U}\right)} 2 \, d\mathbf{x} =
\int_{\partial B\left(\mathcal{U}\right)} -y \, dx + x \, dy.
This relies on the assumption that the current surface is valid, which
implies that the image of the unit triangle under the B |eacute| zier
map --- :math:`B\left(\mathcal{U}\right)` --- has the edges of the
surface as its boundary.
Note that for a given edge :math:`C(r)` with control points
:math:`x_j, y_j`, the integral can be simplified:
.. math::
\int_C -y \, dx + x \, dy = \int_0^1 (x y' - y x') \, dr
= \sum_{i < j} (x_i y_j - y_i x_j) \int_0^1 b_{i, d}
b'_{j, d} \, dr
where :math:`b_{i, d}, b_{j, d}` are Bernstein basis polynomials.
Returns:
float: The area of the current surface.
Raises:
NotImplementedError: If the current surface isn't in
:math:`\mathbf{R}^2`. |
4,085 | def crypto_box_seal_open(ciphertext, pk, sk):
ensure(isinstance(ciphertext, bytes),
"input ciphertext must be bytes",
raising=TypeError)
ensure(isinstance(pk, bytes),
"public key must be bytes",
raising=TypeError)
ensure(isinstance(sk, bytes),
"secret key must be bytes",
raising=TypeError)
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise exc.ValueError("Invalid public key")
if len(sk) != crypto_box_SECRETKEYBYTES:
raise exc.ValueError("Invalid secret key")
_clen = len(ciphertext)
ensure(_clen >= crypto_box_SEALBYTES,
("Input cyphertext must be "
"at least {} long").format(crypto_box_SEALBYTES),
raising=exc.TypeError)
_mlen = _clen - crypto_box_SEALBYTES
plaintext = ffi.new("unsigned char[]", max(1, _mlen))
res = lib.crypto_box_seal_open(plaintext, ciphertext, _clen, pk, sk)
ensure(res == 0, "An error occurred trying to decrypt the message",
raising=exc.CryptoError)
return ffi.buffer(plaintext, _mlen)[:] | Decrypts and returns an encrypted message ``ciphertext``, using the
recipent's secret key ``sk`` and the sender's ephemeral public key
embedded in the sealed box. The box contruct nonce is derived from
the recipient's public key ``pk`` and the sender's public key.
:param ciphertext: bytes
:param pk: bytes
:param sk: bytes
:rtype: bytes
.. versionadded:: 1.2 |
4,086 | def _get_query(self, order=None, filters=None):
order = self._get_order(order)
return self.posts.all().order_by(order) | This method is just to evade code duplication in count() and get_content, since they do basically the same thing |
4,087 | def _value_iterator(self, task_name, param_name):
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
if self._config_path:
yield (self._get_value_from_config(self._config_path[], self._config_path[]),
.format(
self._config_path[], self._config_path[], task_name, param_name))
yield (self._default, None) | Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first. |
4,088 | def enumerate_global_imports(tokens):
imported_modules = []
import_line = False
from_import = False
parent_module = ""
function_count = 0
indentation = 0
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_type == tokenize.INDENT:
indentation += 1
elif token_type == tokenize.DEDENT:
indentation -= 1
elif token_type == tokenize.NEWLINE:
import_line = False
from_import = False
elif token_type == tokenize.NAME:
if token_string in ["def", "class"]:
function_count += 1
if indentation == function_count - 1:
function_count -= 1
elif function_count >= indentation:
if token_string == "import":
import_line = True
elif token_string == "from":
from_import = True
elif import_line:
if token_type == tokenize.NAME \
and tokens[index+1][1] != :
if not from_import \
and token_string not in reserved_words:
if token_string not in imported_modules:
if tokens[index+1][1] == :
parent_module = token_string +
else:
if parent_module:
module_string = (
parent_module + token_string)
imported_modules.append(module_string)
parent_module =
else:
imported_modules.append(token_string)
return imported_modules | Returns a list of all globally imported modules (skips modules imported
inside of classes, methods, or functions). Example::
>>> enumerate_global_modules(tokens)
['sys', 'os', 'tokenize', 're']
.. note::
Does not enumerate imports using the 'from' or 'as' keywords. |
4,089 | def chdir(self,
path,
timeout=shutit_global.shutit_global_object.default_timeout,
note=None,
loglevel=logging.DEBUG):
shutit = self.shutit
shutit.handle_note(note, + path)
shutit.log( + path + , level=logging.DEBUG)
if shutit.build[] in (,):
self.send(ShutItSendSpec(self,
send= + path + ,
timeout=timeout,
echo=False,
loglevel=loglevel))
elif shutit.build[] in (,):
os.chdir(path)
else:
shutit.fail( + str(shutit.build[]))
shutit.handle_note_after(note=note)
return True | How to change directory will depend on whether we are in delivery mode bash or docker.
@param path: Path to send file to.
@param timeout: Timeout on response
@param note: See send() |
4,090 | def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
kurt_size=6000, ext_blocks=1, max_iter=200,
random_state=None, verbose=None):
rng = check_random_state(random_state)
max_weight = 1e8
restart_fac = 0.9
min_l_rate = 1e-10
blowup = 1e4
blowup_fac = 0.5
n_small_angle = 20
degconst = 180.0 / np.pi
extmomentum = 0.5
signsbias = 0.02
signcount_threshold = 25
signcount_step = 2
if ext_blocks > 0:
n_subgauss = 1
n_samples, n_features = data.shape
n_features_square = n_features ** 2
if l_rate is None:
l_rate = 0.01 / math.log(n_features ** 2.0)
if block is None:
block = int(math.floor(math.sqrt(n_samples / 3.0)))
logger.info( % if extended is True
else )
nblock = n_samples // block
lastt = (nblock - 1) * block + 1
if weights is None:
weights = np.identity(n_features, dtype=np.float64)
BI = block * np.identity(n_features, dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
onesrow = np.ones((1, block), dtype=np.float64)
startweights = weights.copy()
oldweights = startweights.copy()
step = 0
count_small_angle = 0
wts_blowup = False
blockno = 0
signcount = 0
if extended is True:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
kurt_size = min(kurt_size, n_samples)
old_kurt = np.zeros(n_features, dtype=np.float64)
oldsigns = np.zeros((n_features, n_features))
olddelta, oldchange = 1., 0.
while step < max_iter:
permute = list(range(n_samples))
rng.shuffle(permute)
for t in range(0, lastt, block):
u = np.dot(data[permute[t:t + block], :], weights)
u += np.dot(bias, onesrow).T
if extended is True:
y = np.tanh(u)
weights += l_rate * np.dot(weights,
BI - np.dot(np.dot(u.T, y), signs) -
np.dot(u.T, u))
bias += l_rate * np.reshape(np.sum(y, axis=0,
dtype=np.float64) * -2.0,
(n_features, 1))
else:
y = 1.0 / (1.0 + np.exp(-u))
weights += l_rate * np.dot(weights,
BI + np.dot(u.T, (1.0 - 2.0 * y)))
bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
dtype=np.float64), (n_features, 1))
max_weight_val = np.max(np.abs(weights))
if max_weight_val > max_weight:
wts_blowup = True
blockno += 1
if wts_blowup:
break
if extended is True:
n = np.fix(blockno / ext_blocks)
if np.abs(n) * ext_blocks == blockno:
if kurt_size < n_samples:
rp = np.floor(rng.uniform(0, 1, kurt_size) *
(n_samples - 1))
tpartact = np.dot(data[rp.astype(int), :], weights).T
else:
tpartact = np.dot(data, weights).T
kurt = kurtosis(tpartact, axis=1, fisher=True)
if extmomentum != 0:
kurt = (extmomentum * old_kurt +
(1.0 - extmomentum) * kurt)
old_kurt = kurt
signs.flat[::n_features + 1] = ((kurt + signsbias) /
np.abs(kurt + signsbias))
ndiff = ((signs.flat[::n_features + 1] -
oldsigns.flat[::n_features + 1]) != 0).sum()
if ndiff == 0:
signcount += 1
else:
signcount = 0
oldsigns = signs
if signcount >= signcount_threshold:
ext_blocks = np.fix(ext_blocks * signcount_step)
signcount = 0
if not wts_blowup:
oldwtchange = weights - oldweights
step += 1
angledelta = 0.0
delta = oldwtchange.reshape(1, n_features_square)
change = np.sum(delta * delta, dtype=np.float64)
if step > 1:
angledelta = math.acos(np.sum(delta * olddelta) /
math.sqrt(change * oldchange))
angledelta *= degconst
oldweights = weights.copy()
if angledelta > anneal_deg:
l_rate *= anneal_step
olddelta = delta
oldchange = change
count_small_angle = 0
else:
if step == 1:
olddelta = delta
oldchange = change
count_small_angle += 1
if count_small_angle > n_small_angle:
max_iter = step
if step > 2 and change < w_change:
step = max_iter
elif change > blowup:
l_rate *= blowup_fac
else:
step = 0
wts_blowup = 0
blockno = 1
l_rate *= restart_fac
weights = startweights.copy()
oldweights = startweights.copy()
olddelta = np.zeros((1, n_features_square), dtype=np.float64)
bias = np.zeros((n_features, 1), dtype=np.float64)
if extended:
signs = np.identity(n_features)
signs.flat[slice(0, n_features * n_subgauss, n_features)]
oldsigns = np.zeros((n_features, n_features))
if l_rate > min_l_rate:
if verbose:
logger.info(
% l_rate)
else:
raise ValueError(
)
return weights.T | Run the (extended) Infomax ICA decomposition on raw data
based on the publications of Bell & Sejnowski 1995 (Infomax)
and Lee, Girolami & Sejnowski, 1999 (extended Infomax)
Parameters
----------
data : np.ndarray, shape (n_samples, n_features)
The data to unmix.
w_init : np.ndarray, shape (n_features, n_features)
The initialized unmixing matrix. Defaults to None. If None, the
identity matrix is used.
l_rate : float
This quantity indicates the relative size of the change in weights.
Note. Smaller learining rates will slow down the procedure.
Defaults to 0.010d / alog(n_features ^ 2.0)
block : int
The block size of randomly chosen data segment.
Defaults to floor(sqrt(n_times / 3d))
w_change : float
The change at which to stop iteration. Defaults to 1e-12.
anneal_deg : float
The angle at which (in degree) the learning rate will be reduced.
Defaults to 60.0
anneal_step : float
The factor by which the learning rate will be reduced once
``anneal_deg`` is exceeded:
l_rate *= anneal_step
Defaults to 0.9
extended : bool
Wheather to use the extended infomax algorithm or not. Defaults to
True.
n_subgauss : int
The number of subgaussian components. Only considered for extended
Infomax.
kurt_size : int
The window size for kurtosis estimation. Only considered for extended
Infomax.
ext_blocks : int
The number of blocks after which to recompute Kurtosis.
Only considered for extended Infomax.
max_iter : int
The maximum number of iterations. Defaults to 200.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
The linear unmixing operator. |
4,091 | def from_iter(cls, data, name=None):
if not name:
name =
if isinstance(data, (list, tuple)):
data = {x: y for x, y in enumerate(data)}
values = [{: k, : , : v}
for k, v in sorted(data.items())]
return cls(name, values=values) | Convenience method for loading data from an iterable.
Defaults to numerical indexing for x-axis.
Parameters
----------
data: iterable
An iterable of data (list, tuple, dict of key/val pairs)
name: string, default None
Name of the data set. If None (default), the name will be set to
``'table'``. |
4,092 | def astype(self, dtype, undefined_on_failure=False):
if (dtype == _Image) and (self.dtype == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.pixel_array_to_image() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure)) | Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to turicreate.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = turicreate.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = turicreate.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}] |
4,093 | def convert_svhn(which_format, directory, output_directory,
output_filename=None):
if which_format not in (1, 2):
raise ValueError("SVHN format needs to be either 1 or 2.")
if not output_filename:
output_filename = .format(which_format)
if which_format == 1:
return convert_svhn_format_1(
directory, output_directory, output_filename)
else:
return convert_svhn_format_2(
directory, output_directory, output_filename) | Converts the SVHN dataset to HDF5.
Converts the SVHN dataset [SVHN] to an HDF5 dataset compatible
with :class:`fuel.datasets.SVHN`. The converted dataset is
saved as 'svhn_format_1.hdf5' or 'svhn_format_2.hdf5', depending
on the `which_format` argument.
.. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco,
Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with
Unsupervised Feature Learning*, NIPS Workshop on Deep Learning
and Unsupervised Feature Learning, 2011.
Parameters
----------
which_format : int
Either 1 or 2. Determines which format (format 1: full numbers
or format 2: cropped digits) to convert.
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'svhn_format_1.hdf5' or
'svhn_format_2.hdf5', depending on `which_format`.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset. |
4,094 | def flesh_out(X, W, embed_dim, CC_labels, dist_mult=2.0, angle_thresh=0.2,
min_shortcircuit=4, max_degree=5, verbose=False):
W = W.astype(bool)
assert np.all(W == W.T),
D = pairwise_distances(X, metric=)
avg_edge_length = np.empty(X.shape[0])
for i,nbr_mask in enumerate(W):
avg_edge_length[i] = D[i,nbr_mask].mean()
dist_thresh = dist_mult * avg_edge_length
dist_mask = (D < dist_thresh) | (D < dist_thresh[:,None])
hops_mask = np.isinf(dijkstra(W, unweighted=True, limit=min_shortcircuit-1))
CC_mask = CC_labels != CC_labels[:,None]
candidate_edges = ~W & dist_mask & hops_mask & CC_mask
if verbose:
print(, candidate_edges.sum(), )
subspaces, _ = cluster_subspaces(X, embed_dim, CC_labels.max()+1, CC_labels)
ii,jj = np.where(np.triu(candidate_edges))
edge_dirs = X[ii] - X[jj]
ssi = subspaces[CC_labels[ii]]
ssj = subspaces[CC_labels[jj]]
F = edge_cluster_angle(edge_dirs, ssi, ssj)
mask = F < angle_thresh
edge_ii = ii[mask]
edge_jj = jj[mask]
edge_order = np.argsort(F[mask])
if verbose:
print(, len(edge_ii), )
degree = W.sum(axis=0)
sorted_edges = np.column_stack((edge_ii, edge_jj))[edge_order]
for e in sorted_edges:
if degree[e].max() < max_degree:
W[e[0],e[1]] = True
W[e[1],e[0]] = True
degree[e] += 1
return Graph.from_adj_matrix(np.where(W, np.sqrt(D), 0)) | Given a connected graph adj matrix (W), add edges to flesh it out. |
4,095 | def compare(orderby_item1, orderby_item2):
type1_ord = _OrderByHelper.getTypeOrd(orderby_item1)
type2_ord = _OrderByHelper.getTypeOrd(orderby_item2)
type_ord_diff = type1_ord - type2_ord
if type_ord_diff:
return type_ord_diff
if type1_ord == 0:
return 0
return _compare_helper(orderby_item1[], orderby_item2[]) | compares the two orderby item pairs.
:param dict orderby_item1:
:param dict orderby_item2:
:return:
Integer comparison result.
The comparator acts such that
- if the types are different we get:
Undefined value < Null < booleans < Numbers < Strings
- if both arguments are of the same type:
it simply compares the values.
:rtype: int |
4,096 | def local_fehdist(feh):
fehdist= 0.8/0.15*np.exp(-0.5*(feh-0.016)**2./0.15**2.)\
+0.2/0.22*np.exp(-0.5*(feh+0.15)**2./0.22**2.)
return fehdist | feh PDF based on local SDSS distribution
From Jo Bovy:
https://github.com/jobovy/apogee/blob/master/apogee/util/__init__.py#L3
2D gaussian fit based on Casagrande (2011) |
4,097 | def set_status(self, action, target):
try:
target = unquote(target)
except (AttributeError, TypeError):
pass
status = "%s (%s) %s" % (self.domain, action, target)
status = status.strip().replace(, )
if len(status) >= self.MAXWIDTH:
tail =
extent = self.MAXWIDTH - (len(tail) + self.RPAD)
self.status = status[:extent] + tail
else:
self.status = status | Sets query status with format: "{domain} ({action}) {target}" |
4,098 | def connect_to_ec2(region=, access_key=None, secret_key=None):
if access_key:
logger.info(.format(region))
connection = ec2.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
else:
metadata = get_instance_metadata(timeout=1, num_retries=1)
if metadata:
try:
region = metadata[][][:-1]
except KeyError:
pass
logger.info(.format(region))
connection = ec2.connect_to_region(region)
if not connection:
logger.error()
sys.exit(1)
return connection | Connect to AWS ec2
:type region: str
:param region: AWS region to connect to
:type access_key: str
:param access_key: AWS access key id
:type secret_key: str
:param secret_key: AWS secret access key
:returns: boto.ec2.connection.EC2Connection -- EC2 connection |
4,099 | def raise_205(instance):
instance.response.status = 205
instance.response.body =
instance.response.body_raw = None
raise ResponseException(instance.response) | Abort the current request with a 205 (Reset Content) response code.
Clears out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 205 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.