repo
stringlengths 1
29
| path
stringlengths 24
332
| code
stringlengths 39
579k
|
---|---|---|
cve-bin-tool-1.0
|
cve-bin-tool-1.0//cve_bin_tool/checkers/python.pyfile:/cve_bin_tool/checkers/python.py:function:guess_contains_python/guess_contains_python
|
def guess_contains_python(lines):
"""Tries to determine if a file includes python
"""
for line in lines:
if (
'Fatal Python error: unable to decode the command line argument' in
line):
return 1
if 'CPython' in line:
return 1
if 'Internal error in the Python interpreter' in line:
return 1
return 0
|
opentuner-0.8.2
|
opentuner-0.8.2//opentuner/search/technique.pyclass:SearchTechnique/get_hyper_parameters
|
@classmethod
def get_hyper_parameters(cls):
"""
return a list of hyper-parameters names for this technique
Name strings must match the corresponding attribute with the hyper-parameter
value on technique instances. Names should also match the key word argument
used when initializing an instance. Hyperparameters should only take literal
values.
For example, given hyper parameter "mutation_rate", then the __init__ method
should have 'mutation_rate' as a key word argument and later have the line
self.mutation_rate = mutation_rate
"""
return []
|
aggregation_builder-0.0.4
|
aggregation_builder-0.0.4//aggregation_builder/operators/group.pyfile:/aggregation_builder/operators/group.py:function:FIRST/FIRST
|
def FIRST(expression):
"""
Returns the value that results from applying an expression to the first document in a group of documents that share the same group by key.
Only meaningful when documents are in a defined order.
See https://docs.mongodb.com/manual/reference/operator/aggregation/first/
for more details
:param expression: expression or variables
:return: Aggregation operator
"""
return {'$first': expression}
|
iSearch
|
iSearch//isearch.pyfile:/isearch.py:function:normal_print/normal_print
|
def normal_print(raw):
""" no colorful text, for output."""
lines = raw.split('\n')
for line in lines:
if line:
print(line + '\n')
|
sqt-0.8.0
|
sqt-0.8.0//sqt/dna.pyfile:/sqt/dna.py:function:n_intervals/n_intervals
|
def n_intervals(sequence, N='N'):
"""
Given a sequence, yield all intervals containing only N characters as
tuples (start, stop). If the sequence is a bytes/bytearray object,
set N=ord(b'N')
>>> list(n_intervals('ACGTnNAC'))
[(4, 6)]
>>> list(n_intervals(b'ACGTnNAC', N=ord(b'N')))
[(4, 6)]
"""
sequence = sequence.upper()
start = sequence.find(N)
while start >= 0:
stop = start + 1
while stop < len(sequence) and sequence[stop] == N:
stop += 1
yield start, stop
start = sequence.find(N, stop)
|
dse-driver-2.11.1
|
dse-driver-2.11.1//dse/cqltypes.pyclass:BoundKind/to_int
|
@classmethod
def to_int(cls, bound_str):
"""
Encode a string as an int for serialization.
"""
return cls._bound_str_to_int_map[bound_str.upper()]
|
yahoo_panoptes-1.3.2
|
yahoo_panoptes-1.3.2//yahoo_panoptes/plugins/enrichment/generic/snmp/cisco/ios/plugin_enrichment_cisco_ios_device_metrics.pyclass:CiscoIOSPluginEnrichmentMetrics/_entity_sensor_scale_to_exponent
|
@staticmethod
def _entity_sensor_scale_to_exponent(sensor_scale):
"""
sensor_scale is an integer index that refers to an exponent applied to entSensorValue. The sensor_exponent
comes from the Cisco definitions.
Args:
sensor_scale(int): entSensorScale value
Returns:
int: signed integer exponent to be applied to entSensorValue to normalize
"""
sensor_exponent = [u'-24', u'-21', u'-18', u'-15', u'-12', u'-9', u'-6',
u'-3', u'0', u'3', u'6', u'9', u'12', u'15', u'18', u'21', u'24']
return int(sensor_exponent[sensor_scale - 1])
|
cosmo_utils-0.1.64
|
cosmo_utils-0.1.64//astropy_helpers/astropy_helpers/commands/build_ext.pyfile:/astropy_helpers/astropy_helpers/commands/build_ext.py:function:should_build_with_cython/should_build_with_cython
|
def should_build_with_cython(package, release=None):
"""Returns the previously used Cython version (or 'unknown' if not
previously built) if Cython should be used to build extension modules from
pyx files. If the ``release`` parameter is not specified an attempt is
made to determine the release flag from `astropy.version`.
"""
try:
version_module = __import__(package + '.cython_version', fromlist=[
'release', 'cython_version'])
except ImportError:
version_module = None
if release is None and version_module is not None:
try:
release = version_module.release
except AttributeError:
pass
try:
cython_version = version_module.cython_version
except AttributeError:
cython_version = 'unknown'
have_cython = False
try:
import Cython
have_cython = True
except ImportError:
pass
if have_cython and (not release or cython_version == 'unknown'):
return cython_version
else:
return False
|
eclcli-1.3.5
|
eclcli-1.3.5//eclcli/storage/storageclient/utils.pyfile:/eclcli/storage/storageclient/utils.py:function:add_arg/add_arg
|
def add_arg(f, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'arguments'):
f.arguments = []
if (args, kwargs) not in f.arguments:
f.arguments.insert(0, (args, kwargs))
|
flowws-analysis-0.5.1
|
flowws-analysis-0.5.1//flowws_analysis/GTAR.pyfile:/flowws_analysis/GTAR.py:function:index_sort_key/index_sort_key
|
def index_sort_key(x):
"""Sorting key to use for getar frame indices"""
return len(x), x
|
onegov
|
onegov//election_day/utils/ballot.pyfile:/election_day/utils/ballot.py:function:get_ballot_data_by_district/get_ballot_data_by_district
|
def get_ballot_data_by_district(ballot):
""" Returns the yeas/nays percentage grouped and keyed by district. """
data = {}
for result in ballot.results_by_district:
district = {'counted': result.counted, 'entities': result.entity_ids}
if result.counted:
district['percentage'] = result.yeas_percentage
data[result.name] = district
return data
|
fake-bpy-module-2.78-20200428
|
fake-bpy-module-2.78-20200428//bpy/ops/ui.pyfile:/bpy/ops/ui.py:function:unset_property_button/unset_property_button
|
def unset_property_button():
"""Clear the property and use default or generated value in operators
"""
pass
|
interval3-2.0.0
|
interval3-2.0.0//interval3.pyclass:Interval/between
|
@classmethod
def between(cls, a, b, closed=True):
"""Returns an interval between two values
Returns an interval between values a and b. If closed is True,
then the endpoints are included. Otherwise, the endpoints are
excluded.
>>> print Interval.between(2, 4)
[2..4]
>>> print Interval.between(2, 4, False)
(2..4)
"""
return cls(a, b, closed=closed)
|
represent-representatives-0.2
|
represent-representatives-0.2//representatives/utils.pyfile:/representatives/utils.py:function:split_name/split_name
|
def split_name(n):
"""Given a name, returns (first_name, last_name)."""
n_bits = n.split(' ')
last = n_bits.pop()
return ' '.join(n_bits), last
|
palantiri-0.2.0
|
palantiri-0.2.0//palantiri/ClassificationPlotHandlers.pyclass:ClassifierPlotHandler/from_pandas_dataframe
|
@classmethod
def from_pandas_dataframe(cls, dataframe, trained_classifier, **params):
"""
Constructing the handler from a pandas dataframe.
:param dataframe: the dataframe form which the handler is constructed.
The 'target' column should be included in the dataframe.
:param trained_classifier: sklearn classifier (trained / fitted).
:param params: other params.
:return: returns the classifier plot handler object.
"""
assert 'target' in dataframe.columns.values, 'target values not in dataframe'
dataset = dict()
dataset['data'] = dataframe.drop('target', axis=1).values
dataset['target'] = dataframe['target'].values
dataset['feature_names'] = dataframe.drop('target', axis=1).columns.values
return cls(dataset, trained_classifier, **params)
|
moviepy_patch-1.0.1
|
moviepy_patch-1.0.1//moviepy/config.pyfile:/moviepy/config.py:function:change_settings/change_settings
|
def change_settings(new_settings=None, filename=None):
""" Changes the value of configuration variables."""
new_settings = new_settings or {}
gl = globals()
if filename:
with open(filename) as in_file:
exec(in_file)
gl.update(locals())
gl.update(new_settings)
|
tocka-Django-1.7.8
|
tocka-Django-1.7.8//django/contrib/admindocs/views.pyfile:/django/contrib/admindocs/views.py:function:get_return_data_type/get_return_data_type
|
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
|
pyctest-0.0.12
|
pyctest-0.0.12//source/kitware-cmake/Source/cmConvertMSBuildXMLToJSON.pyfile:/source/kitware-cmake/Source/cmConvertMSBuildXMLToJSON.py:function:__append_list/__append_list
|
def __append_list(append_to, value):
"""Appends the value to the list."""
if value is not None:
if isinstance(value, list):
append_to.extend(value)
else:
append_to.append(value)
|
caatinga-1.1.1
|
caatinga-1.1.1//caatinga/core/functions.pyfile:/caatinga/core/functions.py:function:_getGlob/_getGlob
|
def _getGlob(args):
"""
Extract the glob value from the args. Items equal to 'from', or beginning
with a dash are ignored.
"""
if len(args) > 0 and args[0] != 'from' and not args[0].startswith('-'):
return args[0]
else:
return '*'
|
ml_metrics-0.1.4
|
ml_metrics-0.1.4//ml_metrics/average_precision.pyfile:/ml_metrics/average_precision.py:function:apk/apk
|
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not actual:
return 1.0
return score / min(len(actual), k)
|
boatswain
|
boatswain//util.pyfile:/util.py:function:find_dependencies/find_dependencies
|
def find_dependencies(name, images):
"""
Finds the dependencies of name in the
images dictionary and returns an array
of all keys in images that should be built
:param name: The name of the image to build
:type name: string
:param images: The dictionary of images
:type images: dict(string: image_definition)
"""
names = []
curname = name
while 'from' in images[curname]:
names.append(curname)
curname = images[curname]['from']
names.append(curname)
return names
|
Theano-1.0.4
|
Theano-1.0.4//theano/gof/link.pyfile:/theano/gof/link.py:function:gc_helper/gc_helper
|
def gc_helper(node_list):
"""
Return the set of Variable instances which are computed by node_list.
Parameters
----------
node_list
List of Apply instances in program execution order.
Returns
-------
2-tuple
FIRST, the set of Variable instances which are computed by node_list,
and SECOND a dictionary that maps each Variable instance to a the last
node to use Variable as an input.
Extended Summary
----------------
This is used to allow garbage collection within graphs.
It ignores view_map and destroy_map. This isn't needed as python
have reference count. In Theano gc, we should not take into
account view_map and destroy_map as if the thunk decided to create
a new output, we would delay uselessly its gc by Python.
"""
last_user = {}
computed = set()
for node in node_list:
for input in node.inputs:
last_user[input] = node
for output in node.outputs:
computed.add(output)
return computed, last_user
|
wrangle-0.6.7
|
wrangle-0.6.7//wrangle/utils/multi_input_support.pyfile:/wrangle/utils/multi_input_support.py:function:multi_input_support/multi_input_support
|
def multi_input_support(X, data):
"""Transformation for X
WHAT: Automatically interprets flexible input methods
for the x variable, and vectorize if the values are string.
HOW: x_transform('text', df)
INPUT: A pandas dataframe and list with two ints, list of ints,
list of string values (column labels), single integer, a single string
value.
OUTPUT: a dataframe with the selected columns. In the case of string
values and a single string label as x input, vectorized text.
"""
if type(X) == list:
if len(X) == 2:
if type(X[0]) == int:
x = data.ix[:, X[0]:X[1]]
if type(X) == list:
if len(X) > 2:
if type(X[0]) == int:
x = data.iloc[:, (X)]
if type(X) == list:
if type(X[0]) == str:
x = data.loc[:, (X)]
if type(X) == int:
x = data.iloc[:, (X)]
if type(X) == str:
x = data[X]
return x
|
djmodels
|
djmodels//contrib/gis/db/backends/postgis/pgraster.pyfile:/contrib/gis/db/backends/postgis/pgraster.py:function:chunk/chunk
|
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
|
superset-erik-0.26.0
|
superset-erik-0.26.0//superset/utils.pyfile:/superset/utils.py:function:error_msg_from_exception/error_msg_from_exception
|
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e)
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team_log.pyclass:EventDetails/paper_enabled_users_group_addition_details
|
@classmethod
def paper_enabled_users_group_addition_details(cls, val):
"""
Create an instance of this class set to the
``paper_enabled_users_group_addition_details`` tag with value ``val``.
:param PaperEnabledUsersGroupAdditionDetails val:
:rtype: EventDetails
"""
return cls('paper_enabled_users_group_addition_details', val)
|
cx_Freeze-6.1
|
cx_Freeze-6.1//cx_Freeze/hooks.pyfile:/cx_Freeze/hooks.py:function:load_hashlib/load_hashlib
|
def load_hashlib(finder, module):
"""hashlib's fallback modules don't exist if the equivalent OpenSSL
algorithms are loaded from _hashlib, so we can ignore the error."""
module.IgnoreName('_md5')
module.IgnoreName('_sha')
module.IgnoreName('_sha256')
module.IgnoreName('_sha512')
|
problem_builder
|
problem_builder//plot.pyfile:/plot.py:function:_normalize_id/_normalize_id
|
def _normalize_id(key):
"""
Helper method to normalize a key to avoid issues where some keys have version/branch and others don't.
e.g. self.scope_ids.usage_id != self.runtime.get_block(self.scope_ids.usage_id).scope_ids.usage_id
"""
if hasattr(key, 'for_branch'):
key = key.for_branch(None)
if hasattr(key, 'for_version'):
key = key.for_version(None)
return key
|
GitHub-Flask-3.2.0
|
GitHub-Flask-3.2.0//flask_github.pyfile:/flask_github.py:function:is_json_response/is_json_response
|
def is_json_response(response):
"""Returns ``True`` if response ``Content-Type`` is JSON.
:param response: :class:~`requests.Response` object to check
:type response: :class:~`requests.Response`
:returns: ``True`` if ``response`` is JSON, ``False`` otherwise
:rtype bool:
"""
content_type = response.headers.get('Content-Type', '')
return content_type == 'application/json' or content_type.startswith(
'application/json;')
|
eric6
|
eric6//DebugClients/Python/coverage/debug.pyfile:/DebugClients/Python/coverage/debug.py:function:info_header/info_header
|
def info_header(label):
"""Make a nice header string."""
return '--{:-<60s}'.format(' ' + label + ' ')
|
dropbox-10.1.2
|
dropbox-10.1.2//dropbox/team_log.pyclass:EventType/shared_content_change_link_expiry
|
@classmethod
def shared_content_change_link_expiry(cls, val):
"""
Create an instance of this class set to the
``shared_content_change_link_expiry`` tag with value ``val``.
:param SharedContentChangeLinkExpiryType val:
:rtype: EventType
"""
return cls('shared_content_change_link_expiry', val)
|
mrg_core
|
mrg_core//util/database.pyclass:DatabaseConnection/_collect_records
|
@staticmethod
def _collect_records(stmt, cur, single, records):
""" Collect the records to return in the :func:`execute` method.
:param str stmt: the SQL statement executed in the cursor object.
:param Cursor cur: the database connection cursor object.
:param bool single: True if the first record is to be returned,
otherwise all records will be retrieved from the cursor.
:param list records: the bucket list to fill. This is the
output object.
"""
if stmt.upper().startswith('DELETE'):
records.append(cur.rowcount)
if stmt.upper().startswith('SELECT') or stmt.upper().startswith('SHOW'):
rows = []
if single:
row = cur.fetchone()
if row:
rows.append(row)
else:
rows = cur.fetchall()
for row in rows:
records.append(dict(row))
|
python-server-cantrips-0.0.4
|
python-server-cantrips-0.0.4//cantrips/protocol/messaging/formats.pyclass:MsgPackFeature/_import_error_message
|
@classmethod
def _import_error_message(cls):
"""
Message error for msgpack not found.
"""
return (
'You need to install msgpack for this to work (pip install msgpack-python>=0.4.6)'
)
|
whoswho
|
whoswho//utils.pyfile:/utils.py:function:equate_prefix/equate_prefix
|
def equate_prefix(name1, name2):
"""
Evaluates whether names match, or one name prefixes another
"""
if len(name1) == 0 or len(name2) == 0:
return False
return name1.startswith(name2) or name2.startswith(name1)
|
breezy-3.0.2
|
breezy-3.0.2//breezy/bzr/branch.pyclass:BzrBranchFormat7/get_format_string
|
@classmethod
def get_format_string(cls):
"""See BranchFormat.get_format_string()."""
return b'Bazaar Branch Format 7 (needs bzr 1.6)\n'
|
snntoolbox
|
snntoolbox//simulation/backends/inisim/ttfs.pyclass:SpikeConcatenate/reset
|
@staticmethod
def reset(sample_idx):
"""Reset layer variables."""
pass
|
bpmn_python-0.0.18
|
bpmn_python-0.0.18//bpmn_python/bpmn_diagram_metrics.pyfile:/bpmn_python/bpmn_diagram_metrics.py:function:CoefficientOfNetworkComplexity_metric/CoefficientOfNetworkComplexity_metric
|
def CoefficientOfNetworkComplexity_metric(bpmn_graph):
"""
Returns the value of the Coefficient of Network Complexity metric
("Ratio of the total number of arcs in a process model to its total number of nodes.")
for the BPMNDiagramGraph instance.
"""
return float(len(bpmn_graph.get_flows())) / float(len(bpmn_graph.
get_nodes()))
|
thinc-7.4.0
|
thinc-7.4.0//thinc/neural/util.pyfile:/thinc/neural/util.py:function:mark_sentence_boundaries/mark_sentence_boundaries
|
def mark_sentence_boundaries(sequences, drop=0.0):
"""Pad sentence sequences with EOL markers."""
for sequence in sequences:
sequence.insert(0, '-EOL-')
sequence.insert(0, '-EOL-')
sequence.append('-EOL-')
sequence.append('-EOL-')
return sequences, None
|
addignore-1.2.7
|
addignore-1.2.7//addignore/utils.pyfile:/addignore/utils.py:function:load_list_cache/load_list_cache
|
def load_list_cache():
"""Loads the list cache.
:return python tuple."""
pass
|
fake-bpy-module-2.79-20200428
|
fake-bpy-module-2.79-20200428//bpy/ops/mesh.pyfile:/bpy/ops/mesh.py:function:primitive_cube_add/primitive_cube_add
|
def primitive_cube_add(radius: float=1.0, calc_uvs: bool=False, view_align:
bool=False, enter_editmode: bool=False, location: float=(0.0, 0.0, 0.0),
rotation: float=(0.0, 0.0, 0.0), layers: bool=(False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False)):
"""Construct a cube mesh
:param radius: Radius
:type radius: float
:param calc_uvs: Generate UVs, Generate a default UV map
:type calc_uvs: bool
:param view_align: Align to View, Align the new object to the view
:type view_align: bool
:param enter_editmode: Enter Editmode, Enter editmode when adding this object
:type enter_editmode: bool
:param location: Location, Location for the newly added object
:type location: float
:param rotation: Rotation, Rotation for the newly added object
:type rotation: float
:param layers: Layer
:type layers: bool
"""
pass
|
zope.server-4.0.2
|
zope.server-4.0.2//src/zope/server/interfaces/ftp.pyclass:IFTPCommandHandler/cmd_user
|
def cmd_user(args):
"""Specify user name. No read access required.
"""
|
fecon236
|
fecon236//tool.pyfile:/tool.py:function:writefile/writefile
|
def writefile(dataframe, filename='tmp-fe-tool.csv', separator=','):
"""Write dataframe to disk file using UTF-8 encoding."""
dataframe.to_csv(filename, sep=separator, encoding='utf-8')
print(' :: Dataframe written to file: ' + filename)
return
|
monero_agent-2.0.6
|
monero_agent-2.0.6//monero_glue/xmr/common.pyfile:/monero_glue/xmr/common.py:function:is_empty/is_empty
|
def is_empty(inp):
"""
True if none or empty
:param inp:
:return:
"""
return inp is None or len(inp) == 0
|
packaging
|
packaging//tags.pyfile:/tags.py:function:_abi3_applies/_abi3_applies
|
def _abi3_applies(python_version):
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
|
oemof
|
oemof//network/groupings.pyfile:/network/groupings.py:function:_uid_or_str/_uid_or_str
|
def _uid_or_str(node_or_entity):
""" Helper function to support the transition from `Entitie`s to `Node`s.
"""
return node_or_entity.uid if hasattr(node_or_entity, 'uid') else str(
node_or_entity)
|
ADRpy-0.1.18
|
ADRpy-0.1.18//ADRpy/unitconversions.pyfile:/ADRpy/unitconversions.py:function:lbfft22mbar/lbfft22mbar
|
def lbfft22mbar(press_lbfft2):
"""Convert pressure value from lb/ft^2 to mbar"""
return press_lbfft2 / 2.08854
|
pocketmiku-0.2.1
|
pocketmiku-0.2.1//pocketmiku/control.pyfile:/pocketmiku/control.py:function:noteOff/noteOff
|
def noteOff(note, velocity=69, channel='0'):
"""
Create a MIDI noteoff event.
note is required
velocity and channel are optional
channel should be a hex digit character
"""
return '8%c %2x %2x' % (channel, note, velocity)
|
pyfs-0.0.8
|
pyfs-0.0.8//fs/fs.pyfile:/fs/fs.py:function:chdir/chdir
|
def chdir(path, **kwargs):
"""change current working directory"""
import os
return os.chdir(path, **kwargs)
|
corpkit-2.3.8
|
corpkit-2.3.8//corpkit/process.pyfile:/corpkit/process.py:function:gui/gui
|
def gui():
"""
Run the graphical interface with the current directory loaded
"""
import os
from corpkit.gui import corpkit_gui
current = os.getcwd()
corpkit_gui(noupdate=True, loadcurrent=current)
|
app_map-0.2
|
app_map-0.2//app_map/docs/app/weather.pyfile:/app_map/docs/app/weather.py:function:weather_all/weather_all
|
def weather_all():
"""
@api {post} /map/weather/weather_all 全部天气
@apiVersion 1.0.0
@apiGroup Weather
@apiDescription 全部天气
@apiParam (Headers) {String} token 令牌
@apiParam (Headers) {String} device_token 设备令牌
@apiParam (Headers) {String} content_type 内容类型
@apiParam (Headers) {String} timestamp 时间戳
@apiParam (Headers) {String} location 经纬度
@apiSuccess (Success) {Object} basic 基础信息
@apiSuccess (Success) {String} basic.cid 地区/城市ID
@apiSuccess (Success) {String} basic.location 地区/城市名称
@apiSuccess (Success) {String} basic.parent_city 该地区/城市的上级城市
@apiSuccess (Success) {String} basic.admin_area 该地区/城市所属行政区域
@apiSuccess (Success) {String} basic.cnty 该地区/城市所属国家名称
@apiSuccess (Success) {String} basic.lat 地区/城市纬度
@apiSuccess (Success) {String} basic.lon 地区/城市经度
@apiSuccess (Success) {String} basic.tz 该地区/城市所在时区
@apiSuccess (Success) {Object} update 接口更新时间
@apiSuccess (Success) {String} update.loc 当地时间,24小时制,格式yyyy-MM-dd HH:mm
@apiSuccess (Success) {String} update.utc UTC时间,24小时制,格式yyyy-MM-dd HH:mm
@apiSuccess (Success) {String} status 接口状态
@apiSuccess (Success) {Object} now 实况天气
@apiSuccess (Success) {String} now.fl 体感温度,默认单位:摄氏度
@apiSuccess (Success) {String} now.tmp 温度,默认单位:摄氏度
@apiSuccess (Success) {String} now.cond_code 实况天气状况代码
@apiSuccess (Success) {String} now.cond_txt 实况天气状况描述
@apiSuccess (Success) {String} now.wind_deg 风向360角度
@apiSuccess (Success) {String} now.wind_dir 风向
@apiSuccess (Success) {String} now.wind_sc 风力
@apiSuccess (Success) {String} now.wind_spd 风速,公里/小时
@apiSuccess (Success) {String} now.hum 相对湿度
@apiSuccess (Success) {String} now.pcpn 降水量
@apiSuccess (Success) {String} now.pres 大气压强
@apiSuccess (Success) {String} now.vis 能见度,默认单位:公里
@apiSuccess (Success) {String} now.cloud 云量
@apiSuccess (Success) {Object} daily_forecast 天气预报
@apiSuccess (Success) {String} daily_forecast.cond_code_d 白天天气状况代码
@apiSuccess (Success) {String} daily_forecast.cond_code_n 晚间天气状况代码
@apiSuccess (Success) {String} daily_forecast.cond_txt_d 白天天气状况描述
@apiSuccess (Success) {String} daily_forecast.cond_txt_n 晚间天气状况描述
@apiSuccess (Success) {String} daily_forecast.date 预报日期
@apiSuccess (Success) {String} daily_forecast.hum 相对湿度
@apiSuccess (Success) {String} daily_forecast.mr 月升时间
@apiSuccess (Success) {String} daily_forecast.ms 月落时间
@apiSuccess (Success) {String} daily_forecast.pcpn 降水量
@apiSuccess (Success) {String} daily_forecast.pop 降水概率
@apiSuccess (Success) {String} daily_forecast.pres 大气压强
@apiSuccess (Success) {String} daily_forecast.sr 日出时间
@apiSuccess (Success) {String} daily_forecast.ss 日落时间
@apiSuccess (Success) {String} daily_forecast.tmp_max 最高温度
@apiSuccess (Success) {String} daily_forecast.tmp_min 最低温度
@apiSuccess (Success) {String} daily_forecast.uv_index 紫外线强度指数
@apiSuccess (Success) {String} daily_forecast.vis 能见度,单位:公里
@apiSuccess (Success) {String} daily_forecast.wind_deg 风向360角度
@apiSuccess (Success) {String} daily_forecast.wind_dir 风向
@apiSuccess (Success) {String} daily_forecast.wind_sc 风力
@apiSuccess (Success) {String} daily_forecast.wind_spd 风速,公里/小时
@apiSuccess (Success) {Object} lifestyle 生活指数
@apiSuccess (Success) {String} lifestyle.brf 生活指数简介
@apiSuccess (Success) {String} lifestyle.txt 生活指数详细描述
@apiSuccess (Success) {String} lifestyle.type 生活指数类型 comf:舒适度指数、cw:洗车指数、drsg:穿衣指数、flu:感冒指数、sport:运动指数、trav:旅游指数、uv:紫外线指数、air:空气污染扩散条件指数、ac:空调开启指数、ag:过敏指数、gl:太阳镜指数、mu:化妆指数、airc:晾晒指数、ptfc:交通指数、fisin:钓鱼指数、spi:防晒指数
@apiSuccessExample {json} Success-Response:
{
"data": [
{
"basic": {
"cid": "CN101020500",
"location": "嘉定",
"parent_city": "上海",
"admin_area": "上海",
"cnty": "中国",
"lat": "31.38352394",
"lon": "121.25033569",
"tz": "+8.00"
},
"update": {
"loc": "2018-07-17 14:48",
"utc": "2018-07-17 06:48"
},
"status": "ok",
"now": {
"cloud": "75",
"cond_code": "101",
"cond_txt": "多云",
"fl": "34",
"hum": "55",
"pcpn": "0.0",
"pres": "1008",
"tmp": "33",
"vis": "10",
"wind_deg": "182",
"wind_dir": "南风",
"wind_sc": "3",
"wind_spd": "16"
},
"daily_forecast": [
{
"cond_code_d": "101",
"cond_code_n": "101",
"cond_txt_d": "多云",
"cond_txt_n": "多云",
"date": "2018-07-17",
"hum": "75",
"mr": "09:24",
"ms": "22:20",
"pcpn": "0.0",
"pop": "1",
"pres": "1009",
"sr": "05:02",
"ss": "18:59",
"tmp_max": "34",
"tmp_min": "27",
"uv_index": "5",
"vis": "18",
"wind_deg": "134",
"wind_dir": "东南风",
"wind_sc": "4-5",
"wind_spd": "29"
}
],
"lifestyle": [
{
"type": "comf",
"brf": "较不舒适",
"txt": "白天天气多云,有风,但会感到有些热,不很舒适。"
}
]
}
]
}
"""
|
pandas-1.0.3
|
pandas-1.0.3//pandas/io/excel/_openpyxl.pyclass:_OpenpyxlWriter/_convert_to_font
|
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object.
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {'sz': 'size', 'b': 'bold', 'i': 'italic', 'u':
'underline', 'strike': 'strikethrough', 'vertalign': 'vertAlign'}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
|
transforms3d-0.3.1
|
transforms3d-0.3.1//versioneer.pyfile:/versioneer.py:function:render_git_describe_long/render_git_describe_long
|
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
return rendered
|
discord
|
discord//colour.pyclass:Colour/dark_grey
|
@classmethod
def dark_grey(cls):
"""A factory method that returns a :class:`Colour` with a value of ``0x607d8b``."""
return cls(6323595)
|
chevah
|
chevah//compat/interfaces.pyclass:IDaemon/launch
|
def launch():
"""
Start the daemon.
"""
|
cirrocumulus-1.0.1
|
cirrocumulus-1.0.1//cirrocumulus/zarr_dataset_backed.pyfile:/cirrocumulus/zarr_dataset_backed.py:function:slice_as_int/slice_as_int
|
def slice_as_int(s: slice, l: int) ->int:
"""Converts slices of length 1 to the integer index they’ll access."""
out = list(range(*s.indices(l)))
assert len(out) == 1
return out[0]
|
behold
|
behold//libs/athena_query_strings.pyfile:/libs/athena_query_strings.py:function:services_by_role/services_by_role
|
def services_by_role(account, days_back, role_arn, role_name):
""" Returns query for eventsource (service) / actions performed by role. """
query_string = f"""SELECT DISTINCT eventsource, eventname FROM behold
WHERE account = '{account}'
AND (useridentity.sessioncontext.sessionissuer.arn = '{role_arn}')
AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now())
ORDER BY eventsource, eventname;"""
return (query_string,
f'athena_results/services_by_role/{account}/{role_name}')
|
pyephember-0.3.1
|
pyephember-0.3.1//pyephember/pyephember.pyfile:/pyephember/pyephember.py:function:zone_is_boost_active/zone_is_boost_active
|
def zone_is_boost_active(zone):
"""
Is the boost active for the zone
"""
return zone['isboostactive']
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/cloudsearch.pyfile:/pyboto3/cloudsearch.py:function:can_paginate/can_paginate
|
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
|
django-thumbnails-0.3.2
|
django-thumbnails-0.3.2//thumbnails/processors.pyfile:/thumbnails/processors.py:function:crop/crop
|
def crop(image, **kwargs):
"""
Crops an image based on given width or height
"""
image.crop(**kwargs)
return image
|
wetb
|
wetb//prepost/dlcdefs.pyfile:/prepost/dlcdefs.py:function:variable_tag_func/variable_tag_func
|
def variable_tag_func(master, case_id_short=False):
"""
When using the Excel definitions, and the whole default setup, the
variable_tag_func is not required to do anything extra.
"""
return master
|
rasterio
|
rasterio//session.pyclass:GSSession/hascreds
|
@classmethod
def hascreds(cls, config):
"""Determine if the given configuration has proper credentials
Parameters
----------
cls : class
A Session class.
config : dict
GDAL configuration as a dict.
Returns
-------
bool
"""
return 'GOOGLE_APPLICATION_CREDENTIALS' in config
|
pyboto3-1.4.4
|
pyboto3-1.4.4//pyboto3/rds.pyfile:/pyboto3/rds.py:function:describe_event_categories/describe_event_categories
|
def describe_event_categories(SourceType=None, Filters=None):
"""
Displays a list of categories for all event source types, or, if specified, for a specified source type. You can see a list of the event categories and source types in the Events topic in the Amazon RDS User Guide.
See also: AWS API Documentation
Examples
This example lists all DB instance event categories.
Expected Output:
:example: response = client.describe_event_categories(
SourceType='string',
Filters=[
{
'Name': 'string',
'Values': [
'string',
]
},
]
)
:type SourceType: string
:param SourceType: The type of source that will be generating the events.
Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot
:type Filters: list
:param Filters: This parameter is not currently supported.
(dict) --This type is not currently supported.
Name (string) -- [REQUIRED]This parameter is not currently supported.
Values (list) -- [REQUIRED]This parameter is not currently supported.
(string) --
:rtype: dict
:return: {
'EventCategoriesMapList': [
{
'SourceType': 'string',
'EventCategories': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
|
django_query_debug
|
django_query_debug//patch.pyclass:PatchDjangoDescriptors/get_warning_for_deferred_fields
|
@staticmethod
def get_warning_for_deferred_fields(instance, using=None, fields=None):
"""Accessing deferred fields will call refresh_from_db for that field."""
if fields is None:
return None
deferred_fields = set(fields).intersection(instance.get_deferred_fields())
if deferred_fields:
return 'Accessing deferred field(s) {}'.format(', '.join(
deferred_fields))
return None
|
nbsite-0.6.7
|
nbsite-0.6.7//examples/sites/holoviews/holoviews/core/data/pandas.pyclass:PandasInterface/unpack_scalar
|
@classmethod
def unpack_scalar(cls, columns, data):
"""
Given a columns object and data in the appropriate format for
the interface, return a simple scalar.
"""
if len(data) != 1 or len(data.columns) > 1:
return data
return data.iat[0, 0]
|
ansible-2.9.7
|
ansible-2.9.7//lib/ansible/module_utils/network/common/utils.pyfile:/lib/ansible/module_utils/network/common/utils.py:function:param_list_to_dict/param_list_to_dict
|
def param_list_to_dict(param_list, unique_key='name', remove_key=True):
"""Rotates a list of dictionaries to be a dictionary of dictionaries.
:param param_list: The aforementioned list of dictionaries
:param unique_key: The name of a key which is present and unique in all of param_list's dictionaries. The value
behind this key will be the key each dictionary can be found at in the new root dictionary
:param remove_key: If True, remove unique_key from the individual dictionaries before returning.
"""
param_dict = {}
for params in param_list:
params = params.copy()
if remove_key:
name = params.pop(unique_key)
else:
name = params.get(unique_key)
param_dict[name] = params
return param_dict
|
pyrates
|
pyrates//utility/annarchy_wrapper.pyfile:/utility/annarchy_wrapper.py:function:adapt_proj/adapt_proj
|
def adapt_proj(proj, params: dict, param_map: dict):
"""Changes the parametrization of a circuit.
Parameters
----------
proj
ANNarchy projection instance.
params
Key-value pairs of the parameters that should be changed.
param_map
Map between the keys in params and the circuit variables.
Returns
-------
Projection
Updated projection instance.
"""
for key in params.keys():
val = params[key]
for op, var in param_map[key]['var']:
edges = param_map[key]['edges'] if 'edges' in param_map[key] else [
]
for source, target, edge in edges:
if (source in proj.name and target in proj.name and edge in
proj.name):
try:
proj.set({var: float(val)})
except TypeError:
proj.set({var: val})
except [ValueError, KeyError]:
pass
return proj
|
thoraxe
|
thoraxe//subexons/subexons.pyfile:/subexons/subexons.py:function:_only_contigous_subexons/_only_contigous_subexons
|
def _only_contigous_subexons(subexon_table):
"""Return True if the subexons should be merged."""
subexon_table = subexon_table.sort_values(by='SubexonRank', ascending=True)
indices = subexon_table.index
n_rows = len(indices)
to_keep = [[]]
group = 0
if n_rows > 1:
for i in range(1, n_rows):
rowi = indices[i - 1]
rowj = indices[i]
if subexon_table.at[rowi, 'Strand'] == 1:
merge = subexon_table.at[rowj, 'SubexonCodingStart'
] - subexon_table.at[rowi, 'SubexonCodingEnd'] == 1
else:
merge = subexon_table.at[rowi, 'SubexonCodingEnd'
] - subexon_table.at[rowj, 'SubexonCodingStart'] == 1
if merge:
if rowi not in to_keep[group]:
to_keep[group].append(rowi)
to_keep[group].append(rowj)
else:
to_keep.append([])
group += 1
return [subexon_table.loc[(group), :] for group in to_keep]
|
diplomacy-research-1.0.0
|
diplomacy-research-1.0.0//diplomacy_research/utils/cluster_config/reinforcement.pyfile:/diplomacy_research/utils/cluster_config/reinforcement.py:function:_get_iterator_device/_get_iterator_device
|
def _get_iterator_device(job_name, task_id):
""" Returns the iterator device to use """
if job_name != 'learner':
return None
return '/job:%s/task:%d' % (job_name, task_id)
|
colosseum
|
colosseum//engine.pyfile:/engine.py:function:calculate_inline_block_replaced_normal_flow_width/calculate_inline_block_replaced_normal_flow_width
|
def calculate_inline_block_replaced_normal_flow_width(node, context):
"""Implements S10.3.10"""
raise NotImplementedError('Section 10.3.10')
|
django_popcorn-1.0.0
|
django_popcorn-1.0.0//popcorn/templatetags/.ropeproject/config.pyfile:/popcorn/templatetags/.ropeproject/config.py:function:set_prefs/set_prefs
|
def set_prefs(prefs):
"""This function is called before opening the project"""
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject', '.hg',
'.svn', '_svn', '.git']
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
prefs['automatic_soa'] = True
prefs['soa_followed_calls'] = 0
prefs['perform_doa'] = True
prefs['validate_objectdb'] = True
prefs['max_history_items'] = 32
prefs['save_history'] = True
prefs['compress_history'] = False
prefs['indent_size'] = 4
prefs['extension_modules'] = []
prefs['import_dynload_stdmods'] = True
prefs['ignore_syntax_errors'] = False
prefs['ignore_bad_imports'] = False
|
manhattan_content-0.3.3
|
manhattan_content-0.3.3//manhattan_content/manhattan/content/snippets.pyclass:SnippetType/get_settings_form
|
@classmethod
def get_settings_form(cls):
"""Return the form used to set settings a snippet of this type"""
return None
|
tiquations-2.0
|
tiquations-2.0//tiquations/convert.pyfile:/tiquations/convert.py:function:gigabytes/gigabytes
|
def gigabytes(bits=None, bytes=None, kilobytes=None, megabytes=None,
terabytes=None, petabytes=None):
"""Usage: Convert to gigabytes. Example: gigabytes(terabytes=2)"""
if bits is not None:
return bits / 8000000000.0
elif bytes is not None:
return bytes / 1000000000.0
elif kilobytes is not None:
return kilobytes / 1000000.0
elif megabytes is not None:
return megabytes / 1000
elif terabytes is not None:
return terabytes * 1000
elif petabytes is not None:
return petabytes * 1000000.0
else:
raise Exception(
'You must specify one value. Example: bits, bytes, kilobytes, megabytes, terabytes, petabytes'
)
|
coalib
|
coalib//bearlib/languages/documentation/DocumentationExtraction.pyfile:/bearlib/languages/documentation/DocumentationExtraction.py:function:_extract_doc_comment_continuous/_extract_doc_comment_continuous
|
def _extract_doc_comment_continuous(content, line, column, markers):
"""
Extract a documentation that starts at given beginning with continuous
layout.
The property of the continuous layout is that the each-line-marker and the
end-marker do equal. Documentation is extracted until no further marker is
found. Applies e.g. for doxygen style python documentation::
## main
#
# detailed
:param content: Presplitted lines of the source-code-string.
:param line: Line where the documentation comment starts (behind the
start marker). Zero-based.
:param column: Column where the documentation comment starts (behind the
start marker). Zero-based.
:param markers: The documentation identifying markers.
:return: If the comment matched layout a triple with end-of-comment
line, column and the extracted documentation. If not
matched, returns None.
"""
marker_len = len(markers[1])
doc_comment = content[line][column:]
line += 1
while line < len(content):
pos = content[line].find(markers[1])
if pos == -1:
return line, 0, doc_comment
else:
doc_comment += content[line][pos + marker_len:]
line += 1
if content[line - 1][-1] == '\n':
column = 0
else:
line -= 1
column = len(content[line])
return line, column, doc_comment
|
json-syntax-2.3.1
|
json-syntax-2.3.1//json_syntax/action_v1.pyfile:/json_syntax/action_v1.py:function:convert_timedelta_str/convert_timedelta_str
|
def convert_timedelta_str(dur):
"""Barebones support for storing a timedelta as an ISO8601 duration."""
micro = '.{:06d}'.format(dur.microseconds) if dur.microseconds else ''
return 'P{:d}DT{:d}{}S'.format(dur.days, dur.seconds, micro)
|
imageio-2.8.0
|
imageio-2.8.0//imageio/plugins/_tifffile.pyfile:/imageio/plugins/_tifffile.py:function:reshape_nd/reshape_nd
|
def reshape_nd(data_or_shape, ndim):
"""Return image array or shape with at least ndim dimensions.
Prepend 1s to image shape as necessary.
>>> reshape_nd(numpy.empty(0), 1).shape
(0,)
>>> reshape_nd(numpy.empty(1), 2).shape
(1, 1)
>>> reshape_nd(numpy.empty((2, 3)), 3).shape
(1, 2, 3)
>>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape
(3, 4, 5)
>>> reshape_nd((2, 3), 3)
(1, 2, 3)
"""
is_shape = isinstance(data_or_shape, tuple)
shape = data_or_shape if is_shape else data_or_shape.shape
if len(shape) >= ndim:
return data_or_shape
shape = (1,) * (ndim - len(shape)) + shape
return shape if is_shape else data_or_shape.reshape(shape)
|
poezio-0.12.1
|
poezio-0.12.1//poezio/roster_sorting.pyfile:/poezio/roster_sorting.py:function:sort_group_size/sort_group_size
|
def sort_group_size(group):
"""Sort by group size"""
return -len(group)
|
disco-py-0.0.12
|
disco-py-0.0.12//disco/bot/plugin.pyclass:BasePluginDeco/post_listener
|
@classmethod
def post_listener(cls):
"""
Runs a function after a listener is triggered.
"""
return cls.add_meta_deco({'type': 'post_listener'})
|
sympathy
|
sympathy//utils/port.pyfile:/utils/port.py:function:disable_linking/disable_linking
|
def disable_linking():
"""
Internal function for disabling linking.
Do not use this in function in client code.
It globally disables linking, currently used to avoid a known bug in h5py
related to h5py.ExternalLink:s and open files.
"""
global _use_linking
_use_linking = False
|
langcodes-py2-1.2.0
|
langcodes-py2-1.2.0//langcodes/tag_parser.pyfile:/langcodes/tag_parser.py:function:normalize_characters/normalize_characters
|
def normalize_characters(tag):
"""
BCP 47 is case-insensitive, and considers underscores equivalent to
hyphens. So here we smash tags into lowercase with hyphens, so we can
make exact comparisons.
>>> normalize_characters('en_US')
'en-us'
>>> normalize_characters('zh-Hant_TW')
'zh-hant-tw'
"""
return tag.lower().replace('_', '-')
|
sas-dlpy-1.2.0
|
sas-dlpy-1.2.0//dlpy/model_conversion/write_sas_code.pyfile:/dlpy/model_conversion/write_sas_code.py:function:write_input_layer/write_input_layer
|
def write_input_layer(model_name='sas', layer_name='data', channels='-1',
width='-1', height='-1', scale='1.0', offsets=None, std=None,
model_type='CNN'):
"""
Generate Python code defining a SAS deep learning input layer
Parameters
----------
model_name : string
Name for deep learning model
layer_name : string
Layer name
channels : string
number of input channels
width : string
image width
height : string
image height
scale : string
scaling factor to apply to raw image pixel data
offsets : list
image channel offsets, these values will be subtracted from the pixels of
each image channel
std : list
image channel standardization, the pixels of each image channel will be divided
by these values
model_type : string
Specifies the deep learning model type (either CNN or RNN)
Returns
-------
string
String representing Python code defining a SAS deep learning input layer
"""
if offsets is None:
str_offset = 'None'
else:
str_offset = repr(offsets)
if std is None:
str_std = 'None'
else:
str_std = repr(std)
if model_type == 'CNN':
out = [
'def sas_model_gen(s, input_crop_type=None, input_channel_offset='
+ str_offset + ', norm_std = ' + str_std +
', input_image_size=None):',
' # quick check for deeplearn actionset',
' actionset_list = s.actionsetinfo().setinfo.actionset.tolist()',
' actionset_list = [item.lower() for item in actionset_list]',
' if "deeplearn" not in actionset_list:s.loadactionset("deeplearn")'
, ' ', ' # quick error-checking and default setting',
' if (input_crop_type is None):',
' input_crop_type="NONE"', ' else:',
' if (input_crop_type.upper() != "NONE") and (input_crop_type.upper() != "UNIQUE"):'
,
' raise ValueError("Parameter input_crop_type can only be NONE or UNIQUE")'
, '', ' if (input_image_size is not None):',
' channels = input_image_size[0]',
' if (len(input_image_size) == 2):',
' height = width = input_image_size[1]',
' elif (len(inputImageSize) == 3):',
' height,width = input_image_size[1:]',
' else:',
' raise ValueError("Parameter input_image_size must be a tuple with two or three entries")'
, '', ' # instantiate model',
' s.buildModel(model=dict(name=' + repr(model_name) +
',replace=True),type="CNN")', '', ' # input layer',
' nchannels=' + channels,
' if input_channel_offset is None and nchannels==3:',
' print("INFO: Setting channel mean values to ImageNet means")'
, ' input_channel_offset = [103.939, 116.779, 123.68]',
' s.addLayer(model=' + repr(model_name) + ', name=' +
repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' +
channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale +
', randomcrop=input_crop_type, offsets=input_channel_offset, offsetStd=norm_std))'
, ' elif input_channel_offset is not None:',
' s.addLayer(model=' + repr(model_name) + ', name=' +
repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' +
channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale +
', randomcrop=input_crop_type, offsets=input_channel_offset, offsetStd=norm_std))'
, ' else:', ' s.addLayer(model=' + repr(model_name) +
', name=' + repr(layer_name) + ',',
' layer=dict( type="input", nchannels=' +
channels + ', width=' + width + ', height=' + height + ',',
' scale = ' + scale +
', randomcrop=input_crop_type, offsetStd=norm_std))']
else:
out = ['def sas_model_gen(s):',
' # quick check for deeplearn actionset',
' actionset_list = s.actionsetinfo().setinfo.actionset.tolist()',
' actionset_list = [item.lower() for item in actionset_list]',
' if "deeplearn" not in actionset_list:s.loadactionset("deeplearn")'
, ' ', '', ' # instantiate model',
' s.buildModel(model=dict(name=' + repr(model_name) +
',replace=True),type="RNN")', '', ' # input layer',
' s.addLayer(model=' + repr(model_name) + ', name=' + repr(
layer_name) + ',',
' layer=dict( type="input", nchannels=' + channels +
', width=' + width + ',', ' height=' +
height + '))']
return '\n'.join(out)
|
process-improve-0.6.9
|
process-improve-0.6.9//process_improve/datasets.pyfile:/process_improve/datasets.py:function:boilingpot/boilingpot
|
def boilingpot():
"""
Full factorial experiments for stove-top boiling of water.
Description
-----------
The data are from boiling water in a pot under various conditions. The
response variable, y, is the time taken, in minutes to reach 90 degrees
Celsius. Accurately measuring the time to actual boiling is hard, hence
the 90 degrees Celsius point is used instead.
Three factors are varied in a full factorial manner (the first 8
observations). The data are in standard order, however the actual
experiments were run in random order. The last 3 rows are runs close to,
or interior to the factorial.
Factors varied were:
A = Amount of water: low level was 500 mL, and high level was 600 mL
B = Lid off (low level) or lid on (high level)
C = Size of pot used: low level was 2 L, and high level was 3 L.
Coded values for A, B and C should be used in the linear regression model
analysis, with -1 representing the low value and +1 the high value.
Dimensions
----------
A data frame containing 11 observations of 4 variables (A, B, C, with y as
a response variable.
Source
------
MOOC on Design of Experiments: ``Experimentation for Improvement'',
https://learnche.org
Example
-------
"""
pass
|
PathCORE-T-1.0.2
|
PathCORE-T-1.0.2//pathcore/network.pyfile:/pathcore/network.py:function:_permutation_correctness/_permutation_correctness
|
def _permutation_correctness(pathway_feature_tuples, original):
"""Determine whether the generated permutation is a valid permutation.
Used in `permute_pathways_across_features`.
(Cannot be identical to the original significant pathways list,
and a feature should map to a distinct set of pathways.)
"""
if pathway_feature_tuples:
if set(pathway_feature_tuples) == set(original):
return False
pathways_in_feature = {}
for pathway, feature in pathway_feature_tuples:
if feature not in pathways_in_feature:
pathways_in_feature[feature] = set()
if pathway in pathways_in_feature[feature]:
return False
else:
pathways_in_feature[feature].add(pathway)
return True
|
pysheet
|
pysheet//pysheet.pyfile:/pysheet.py:function:isList/isList
|
def isList(x):
"""returns True if x is some kind of a list"""
return not hasattr(x, 'strip') and hasattr(x, '__getitem__') or hasattr(x,
'__iter__')
|
audio.coders-4.0.2
|
audio.coders-4.0.2//.lib/setuptools/command/alias.pyfile:/.lib/setuptools/command/alias.py:function:shquote/shquote
|
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in ('"', "'", '\\', '#'):
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
|
rich_base_provider-1.0.1
|
rich_base_provider-1.0.1//rich_base_provider/sysadmin/coupon/models.pyclass:Coupon/get_coupons_by_coupon_code_list
|
@classmethod
def get_coupons_by_coupon_code_list(cls, code_list):
"""
通过已使用优惠券code_list查询优惠券
:param coupons_list:
:return:
"""
try:
return cls.objects(coupon_code__in=code_list)
except Exception as e:
return False
|
paradrop
|
paradrop//core/config/wifi.pyfile:/core/config/wifi.py:function:getOSWirelessConfig/getOSWirelessConfig
|
def getOSWirelessConfig(update):
"""
Read settings from networkInterfaces for wireless interfaces.
Store wireless configuration settings in osWirelessConfig.
"""
interfaces = update.cache_get('networkInterfaces')
if interfaces is None:
return
wifiIfaces = list()
for iface in interfaces:
if not iface['type'].startswith('wifi'):
continue
config = {'type': 'wifi-iface'}
options = {'device': iface['device'], 'network': iface[
'externalIntf'], 'mode': iface.get('mode', 'ap')}
options.update(iface['wireless'])
if 'ssid' in iface:
options['ssid'] = iface['ssid']
if 'encryption' in iface:
options['encryption'] = iface['encryption']
if 'key' in iface:
options['key'] = iface['key']
wifiIfaces.append((config, options))
update.cache_set('osWirelessConfig', wifiIfaces)
|
dgl_cu90-0.4.3.post2.data
|
dgl_cu90-0.4.3.post2.data//purelib/dgl/data/chem/utils/complex_to_graph.pyfile:/purelib/dgl/data/chem/utils/complex_to_graph.py:function:get_atomic_numbers/get_atomic_numbers
|
def get_atomic_numbers(mol, indices):
"""Get the atomic numbers for the specified atoms.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
indices : list of int
Specifying atoms.
Returns
-------
list of int
Atomic numbers computed.
"""
atomic_numbers = []
for i in indices:
atom = mol.GetAtomWithIdx(i)
atomic_numbers.append(atom.GetAtomicNum())
return atomic_numbers
|
OctoPrint-1.4.0
|
OctoPrint-1.4.0//src/octoprint/util/comm.pyfile:/src/octoprint/util/comm.py:function:parse_capability_line/parse_capability_line
|
def parse_capability_line(line):
"""
Parses the provided firmware capability line.
Lines are expected to be of the format
Cap:<capability name in caps>:<0 or 1>
e.g.
Cap:AUTOREPORT_TEMP:1
Cap:TOGGLE_LIGHTS:0
Args:
line (str): the line to parse
Returns:
tuple: a 2-tuple of the parsed capability name and whether it's on (true) or off (false), or None if the line
could not be parsed
"""
line = line.lower()
if line.startswith('cap:'):
line = line[len('cap:'):]
parts = line.split(':')
if len(parts) != 2:
return None
capability, flag = parts
if not flag in ('0', '1'):
return None
return capability.upper(), flag == '1'
|
zope.app.publisher-4.2.0
|
zope.app.publisher-4.2.0//src/zope/app/publisher/interfaces/ftp.pyclass:IFTPDirectoryPublisher/size
|
def size(name):
"""Return the size of the file at path
"""
|
longdivision-0.1
|
longdivision-0.1//longdivision.pyfile:/longdivision.py:function:splitdigits/splitdigits
|
def splitdigits(n):
"""
>>> splitdigits(6357)
[6, 3, 5, 7]
"""
digits = []
while n:
digits.insert(0, n % 10)
n //= 10
return digits
|
pre-workbench-0.1a8
|
pre-workbench-0.1a8//pre_workbench/hexdump.pyfile:/pre_workbench/hexdump.py:function:chunks/chunks
|
def chunks(seq, size):
"""Generator that cuts sequence (bytes, memoryview, etc.)
into chunks of given size. If `seq` length is not multiply
of `size`, the lengh of the last chunk returned will be
less than requested.
>>> list( chunks([1,2,3,4,5,6,7], 3) )
[[1, 2, 3], [4, 5, 6], [7]]
"""
d, m = divmod(len(seq), size)
for i in range(d):
yield seq[i * size:(i + 1) * size]
if m:
yield seq[d * size:]
|
zag
|
zag//engines/worker_based/server.pyclass:Server/_parse_message
|
@staticmethod
def _parse_message(message):
"""Extracts required attributes out of the messages properties.
This extracts the `reply_to` and the `correlation_id` properties. If
any of these required properties are missing a `ValueError` is raised.
"""
properties = []
for prop in ('reply_to', 'correlation_id'):
try:
properties.append(message.properties[prop])
except KeyError:
raise ValueError("The '%s' message property is missing" % prop)
return properties
|
mxnet-1.6.0.data
|
mxnet-1.6.0.data//purelib/mxnet/ndarray/gen_op.pyfile:/purelib/mxnet/ndarray/gen_op.py:function:linalg_gelqf/linalg_gelqf
|
def linalg_gelqf(A=None, out=None, name=None, **kwargs):
"""LQ factorization for general matrix.
Input is a tensor *A* of dimension *n >= 2*.
If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A*
must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ
factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so
that:
*A* = *L* \\* *Q*
Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal,
and *Q* is row-orthonormal, meaning that
*Q* \\* *Q*\\ :sup:`T`
is equal to the identity matrix of shape *(x, x)*.
If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all
inputs (batch mode).
.. note:: The operator supports float32 and float64 data types only.
Examples::
Single LQ factorization
A = [[1., 2., 3.], [4., 5., 6.]]
Q, L = gelqf(A)
Q = [[-0.26726124, -0.53452248, -0.80178373],
[0.87287156, 0.21821789, -0.43643578]]
L = [[-3.74165739, 0.],
[-8.55235974, 1.96396101]]
Batch LQ factorization
A = [[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]]
Q, L = gelqf(A)
Q = [[[-0.26726124, -0.53452248, -0.80178373],
[0.87287156, 0.21821789, -0.43643578]],
[[-0.50257071, -0.57436653, -0.64616234],
[0.7620735, 0.05862104, -0.64483142]]]
L = [[[-3.74165739, 0.],
[-8.55235974, 1.96396101]],
[[-13.92838828, 0.],
[-19.09768702, 0.52758934]]]
Defined in src/operator/tensor/la_op.cc:L798
Parameters
----------
A : NDArray
Tensor of input matrices to be factorized
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
"""
return 0,
|
autopandas
|
autopandas//utils/sdv.pyfile:/utils/sdv.py:function:numeric/numeric
|
def numeric(column):
""" Normalize a numerical column.
"""
return (column - min(column)) / (max(column) - min(column)), min(column
), max(column)
|
benchlingapi-2.1.12
|
benchlingapi-2.1.12//benchlingapi/models/mixins.pyclass:RegistryMixin/list_in_registry
|
@classmethod
def list_in_registry(cls, registry_id: str=None, registry_name: str=None,
**params):
"""List instances contained in the registry.
:param registry_id: registery id. If none, 'registry_name' must be provided.
:param registry_name: registry name. If None, 'registry_id' must be provided
:param params: additional search parameters
:return: list of models in registry
"""
if registry_id is None:
registry_id = cls.session.Registry.find_registry(id=registry_id,
name=registry_name).id
return cls.list(registry_id=registry_id, **params)
|
vos-3.1.0
|
vos-3.1.0//vos/vos.pyclass:VOFile/flush
|
@staticmethod
def flush():
"""
Flush is a NO OP in VOFile: only really flush on close.
@return:
"""
return
|
perceval
|
perceval//backends/core/slack.pyclass:Slack/has_archiving
|
@classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend supports items archive
"""
return True
|
pgctl-3.1.1
|
pgctl-3.1.1//pgctl/functions.pyfile:/pgctl/functions.py:function:bestrelpath/bestrelpath
|
def bestrelpath(path, relto=None):
"""Return a relative path only if it's under $PWD (or `relto`)"""
if relto is None:
from os import getcwd
relto = getcwd()
from os.path import relpath
relpath = relpath(path, relto)
if relpath.startswith('.'):
return path
else:
return relpath
|
temp
|
temp//core/config_default.pyfile:/core/config_default.py:function:userFilters/userFilters
|
def userFilters():
"""
customize user filters to subscribe
"""
return ['/restapi/v1.0/account/~/extension/~/message-store']
|
exopy_pulses-0.1.0
|
exopy_pulses-0.1.0//exopy_pulses/pulses/utils/normalizers.pyfile:/exopy_pulses/pulses/utils/normalizers.py:function:_normalize/_normalize
|
def _normalize(name):
"""Private normalizing function.
Replaces '_' by spaces and add spaces between 'aA' sequences.
"""
package = None
if '.' in name:
package, name = name.rsplit('.', 1)
aux = ''
n_len = len(name)
for i, char in enumerate(name):
if char == '_':
aux += ''
continue
if char != '\x00':
if char.isupper() and i != 0:
if name[i - 1].islower():
if i + 1 != n_len and name[i + 1].islower():
aux += ' ' + char.lower()
else:
aux += ' ' + char
elif i + 1 != n_len and name[i + 1].islower():
aux += ' ' + char.lower()
else:
aux += char
elif i == 0:
aux += char.upper()
else:
aux += char
return package + '.' + aux if package else aux
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.