blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1386f9f07d356dd3aa5604b39f780c4f5fd5eb2 | d1b9c5bb6992e1eabe2b5e4eea01f99384d901bb | /aiida_tbextraction/fp_run/wannier_input/_vasp.py | 94e746cd2789d976497857667c684a4aa521e5a4 | [
"Apache-2.0"
]
| permissive | zx-sdu/aiida-tbextraction | c01148541aded7324fe8cf8ad01f1a54d9e1bf43 | 0bf6d19cbc643e0bdbbe30fe1dd0c6179eb6a647 | refs/heads/master | 2020-09-27T19:53:05.896439 | 2019-01-24T11:40:40 | 2019-01-24T11:40:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,882 | py | # -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Defines a workflow that calculates the Wannier90 input files using VASP.
"""
from fsc.export import export
import numpy as np
from aiida.orm import Code, DataFactory, CalculationFactory
from aiida.orm.data.array.bands import BandsData
from aiida.work.workchain import ToContext
from aiida_tools import check_workchain_step
from aiida_vasp.io.win import WinParser # pylint: disable=import-error,useless-suppression
from . import WannierInputBase
from .._helpers._inline_calcs import reduce_num_wann_inline
@export
class VaspWannierInput(WannierInputBase):
"""
Calculates the Wannier90 input files using VASP.
"""
@classmethod
def define(cls, spec):
super(VaspWannierInput, cls).define(spec)
ParameterData = DataFactory('parameter')
spec.input('code', valid_type=Code, help='Code that runs VASP.')
spec.input(
'parameters',
valid_type=ParameterData,
help='Parameters for the Vasp2w90 calculation.'
)
spec.input_namespace(
'calculation_kwargs',
required=False,
dynamic=True,
help='Keyword arguments passed to the Vasp2w90 calculation.'
)
spec.outline(cls.submit_calculation, cls.get_result)
@check_workchain_step
def submit_calculation(self):
"""
Run the Vasp2w90 calculation.
"""
self.report("Submitting VASP2W90 calculation.")
return ToContext(
vasp_calc=self.submit(
CalculationFactory('vasp.vasp2w90').process(),
structure=self.inputs.structure,
potential={(kind, ): pot
for kind, pot in self.inputs.potentials.items()},
kpoints=self.inputs.kpoints_mesh,
parameters=self.inputs.parameters,
code=self.inputs.code,
wannier_parameters=self.inputs.get('wannier_parameters', None),
wannier_projections=self.inputs.
get('wannier_projections', None),
**self.inputs.get('calculation_kwargs', {})
)
)
@check_workchain_step
def get_result(self):
"""
Get the VASP result and create the necessary outputs.
"""
self.out(
'wannier_settings',
DataFactory('parameter')(dict={
'seedname': 'wannier90'
})
)
vasp_calc_output = self.ctx.vasp_calc.out
retrieved_folder = vasp_calc_output.retrieved
folder_list = retrieved_folder.get_folder_list()
assert all(
filename in folder_list for filename in
['wannier90.amn', 'wannier90.mmn', 'wannier90.eig']
)
self.report("Adding Wannier90 inputs to output.")
self.out('wannier_input_folder', retrieved_folder)
# reduce 'num_wann' if 'exclude_bands' is given
self.out(
'wannier_parameters',
reduce_num_wann_inline(vasp_calc_output.wannier_parameters)[1]
)
self.out('wannier_bands', self.parse_wannier_bands(retrieved_folder))
self.out('wannier_projections', vasp_calc_output.wannier_projections)
def parse_wannier_bands(self, retrieved_folder):
"""
Parse the Wannier90 bands from the .win and .eig files.
"""
bands = BandsData()
bands.set_kpoints(
self.parse_kpts(retrieved_folder.get_abs_path('wannier90.win'))
)
bands.set_bands(
self.parse_eig(retrieved_folder.get_abs_path('wannier90.eig'))
)
return bands
# TODO: Replace with tools from aiida-wannier90, or integrate in vasp2w90
@staticmethod
def parse_kpts(win_file):
"""
Parse the k-points used by Wannier90 from the .win file.
"""
kpoints = []
for line in WinParser(win_file).result['kpoints']:
kpoints.append([float(x) for x in line.split()])
return np.array(kpoints)
# TODO: Replace with tools from aiida-wannier90, or integrate in vasp2w90
@staticmethod
def parse_eig(eig_file):
"""
Parse the eigenvalues used by Wannier90 from the .eig file.
"""
idx = 1
bands = []
bands_part = []
with open(eig_file, 'r') as in_file:
for line in in_file:
_, idx_new, val = line.split()
idx_new = int(idx_new)
val = float(val)
if idx_new > idx:
idx = idx_new
bands.append(bands_part)
bands_part = []
bands_part.append(val)
bands.append(bands_part)
return np.array(bands)
| [
"[email protected]"
]
| |
f0821f868eb86caed7a71549fa9b479021aa452e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03563/s841164729.py | 7bbf69bc6f80a866d0951cf151ac7ae99dd8cfb0 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import sys
IS = lambda: sys.stdin.readline().rstrip()
II = lambda: int(IS())
MII = lambda: list(map(int, IS().split()))
MIIZ = lambda: list(map(lambda x: x-1, MII()))
def main():
r = II()
g = II()
print(2*g-r)
if __name__ == '__main__':
main() | [
"[email protected]"
]
| |
2c8c63a288fef75b15685e4240da5659502119b6 | 81faab414dcb989904959180bb9233661e884d06 | /qPython/qpython/qcollection.py | 9132742be20e70419c0a847e2d6ee2f075141300 | [
"Apache-2.0"
]
| permissive | xxtEchjovs44/kdbfiles | c781bee7969c6143fc4e947b3960aea79e03e195 | 16baaf5da56ed232e3405c84e526ee7f93422f66 | refs/heads/master | 2020-12-23T20:35:00.446238 | 2019-12-09T06:58:16 | 2019-12-09T06:58:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,493 | py | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qtype import * # @UnusedWildImport
from qpython import MetaData
from qpython.qtemporal import qtemporal, from_raw_qtemporal, to_raw_qtemporal
class QList(numpy.ndarray):
'''An array object represents a q vector.'''
def _meta_init(self, **meta):
'''Initialises the meta-information.'''
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dtype, self.meta.qtype, self.tostring()))
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
class QTemporalList(QList):
'''An array object represents a q vector of datetime objects.'''
def __getitem__(self, idx):
return qtemporal(from_raw_qtemporal(numpy.ndarray.__getitem__(self, idx), -abs(self.meta.qtype)), qtype = -abs(self.meta.qtype))
def __setitem__(self, idx, value):
numpy.ndarray.__setitem__(self, idx, to_raw_qtemporal(value, - -abs(self.meta.qtype)))
def raw(self, idx):
'''Gets the raw representation of the datetime object at the specified
index.
>>> t = qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)
>>> print t[0]
2001-01-01 [metadata(qtype=-14)]
>>> print t.raw(0)
366
:Parameters:
- `idx` (`integer`) - array index of the datetime object to be retrieved
:returns: raw representation of the datetime object
'''
return numpy.ndarray.__getitem__(self, idx)
def get_list_qtype(array):
'''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))
if isinstance(array, QList):
return -abs(array.meta.qtype)
qtype = None
if array.dtype == '|S1':
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(array.dtype.type, None)
if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)
return qtype
def qlist(array, adjust_dtype = True, **meta):
'''Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print '%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v)
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print '%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v)
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print '%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v)
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print '%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v)
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print '%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v)
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError`
'''
if type(array) in (list, tuple):
if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:
# force shape and dtype for generic lists
tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))
for i in xrange(len(array)):
tarray[i] = array[i]
array = tarray
else:
array = numpy.array(array)
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))
qtype = None
is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)
if meta and 'qtype' in meta:
qtype = -abs(meta['qtype'])
dtype = PY_TYPE[qtype]
if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:
array = array.astype(dtype = dtype)
qtype = get_list_qtype(array) if qtype is None else qtype
meta['qtype'] = qtype
is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \
and not is_numpy_temporal
vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)
vector._meta_init(**meta)
return vector
class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]])
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in xrange(len(self.keys))]
def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in xrange(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
class QTable(numpy.recarray):
'''Represents a q table.
Internal table data is stored as a `numpy.array` separately for each column.
This mimics the internal representation of tables in q.
'''
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print '%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t)
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print '%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t)
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print '%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t)
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in xrange(len(columns)):
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.str)
if columns[i] in meta:
data[i] = qlist(data[i], qtype = meta[columns[i]])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[columns[i]] = data[i].meta.qtype
dtypes.append((columns[i], data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table
class QKeyedTable(object):
'''Represents a q keyed table.
:class:`.QKeyedTable` is built with two :class:`.QTable`\s, one representing
keys and the other values.
Keyed tables example:
>>> # q: ([eid:1001 1002 1003] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))
>>> t = QKeyedTable(qtable(['eid'],
... [qlist(numpy.array([1001, 1002, 1003]), qtype = QLONG_LIST)]),
... qtable(['pos', 'dates'],
... [qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([366, 121, qnull(QDATE)]), qtype = QDATE_LIST)]))
>>> print '%s: %s' % (type(t), t)
>>> print '%s dtype: %s meta: %s' % (type(t.keys), t.keys.dtype, t.keys.meta)
>>> print '%s dtype: %s meta: %s' % (type(t.values), t.values.dtype, t.values.meta)
<class 'qpython.qcollection.QKeyedTable'>: [(1001L,) (1002L,) (1003L,)]![('d1', 366) ('d2', 121) ('d3', -2147483648)]
<class 'qpython.qcollection.QTable'> dtype: [('eid', '<i8')] meta: metadata(qtype=98, eid=-7)
<class 'qpython.qcollection.QTable'> dtype: [('pos', 'S2'), ('dates', '<i4')] meta: metadata(dates=-14, qtype=98, pos=-11)
:Parameters:
- `keys` (`QTable`) - table keys
- `values` (`QTable`) - table values
:raises: `ValueError`
'''
def __init__(self, keys, values):
if not isinstance(keys, QTable):
raise ValueError('Keys array is required to be of type: QTable')
if not isinstance(values, QTable):
raise ValueError('Values array is required to be of type: QTable')
if len(keys) != len(values):
raise ValueError('Keys and value arrays cannot have different length')
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
return isinstance(other, QKeyedTable) and numpy.array_equal(self.keys, other.keys) and numpy.array_equal(self.values, other.values)
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the keyed table's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in xrange(len(self.keys))]
def iteritems(self):
'''Return an iterator over the keyed table's ``(key, value)`` pairs.'''
for x in xrange(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the keyed table's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the keyed table's values.'''
return iter(self.values)
| [
"[email protected]"
]
| |
a5cbb7d24f452dbe657a97d15550742eaef4cf13 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03409/s468789013.py | b57e5c6785ee11c84c33be14a422e38f546a4aa9 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | N = int(input())
R = [tuple(map(int, input().split())) for _ in range(N)]
B = [tuple(map(int, input().split())) for _ in range(N)]
R = sorted(R)
B = sorted(B)
res = 0
for bx, by in B:
idx = -1
tmp = -1
for i, (rx, ry) in enumerate(R):
if rx < bx and ry < by:
if ry >= tmp:
tmp = ry
idx = i
if idx != -1:
R.pop(idx)
res += 1
print(res) | [
"[email protected]"
]
| |
d41e112d9da13c81b9d2b69b9e6a2da687d3b496 | 53db924607abc85139dafc8c490218721c7cf9c3 | /redturtle/smartlink/config.py | e9813a95e40c4e35ca7ecf6dbf28d38387c4216c | []
| no_license | RedTurtle/redturtle.smartlink | 4f6fbf45551b91d9725efd42758a310bd3508fa6 | f06b6dd45361af170127fade78fe5c936426b0cf | refs/heads/master | 2021-01-17T14:00:39.273854 | 2017-04-24T07:50:29 | 2017-04-24T07:50:29 | 4,359,396 | 1 | 1 | null | 2021-07-26T14:56:03 | 2012-05-17T14:38:30 | Python | UTF-8 | Python | false | false | 156 | py | from Products.ATContentTypes.permission import permissions
PROJECTNAME = 'redturtle.smartlink'
ADD_PERMISSIONS = {
'SmartLink': permissions['Link']
}
| [
"[email protected]"
]
| |
66542ffb5ad08a5084f91e3a68b4479c2696fe83 | a2098c9c8d39cc9e392f21de64c7ced0549d6f1f | /custom/signup/backends.py | a7f6895e690b4c25fe22dab4e2fadd9368bc9ae8 | []
| no_license | dmitryro/divorcesus | 23fe394b0d065f635ecb11eed945cc4fcb9bb829 | 8ecedb2b8a019e63f37702888dd12e994a75105e | refs/heads/master | 2022-12-11T17:20:13.348413 | 2020-10-01T17:27:57 | 2020-10-01T17:27:57 | 56,432,086 | 0 | 1 | null | 2022-12-08T02:22:29 | 2016-04-17T11:05:27 | JavaScript | UTF-8 | Python | false | false | 128 | py | from social_core.backends.facebook import FacebookOAuth2
class CustomFacebookOauth(FacebookOAuth2):
REDIRECT_STATE = False
| [
"[email protected]"
]
| |
36a512ba1550fa45e98d9afb5b913aa7b5c5b0b3 | f8ffac4fa0dbe27316fa443a16df8a3f1f5cff05 | /Python/Merge_the_Tools.py | 1b827ae71a08079bff6e8e27327e91e02862fc3c | []
| no_license | ankitniranjan/HackerrankSolutions | e27073f9837787a8af7a0157d95612028c07c974 | e110c72d3b137cf4c5cef6e91f58a17452c54c08 | refs/heads/master | 2023-03-16T19:06:17.805307 | 2021-03-09T16:28:39 | 2021-03-09T16:28:39 | 292,994,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | def merge_the_tools(string, k):
# your code goes here
for i in range(0, len(string), k):
str = string[i:i+k]
sub = ''
for s in str:
if s not in sub: #filtering data
sub += s #merging characters
print(sub)
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
| [
"[email protected]"
]
| |
afbf98877c1eb4bfdf843e70f406f5061bfa7b3d | 3293dc42e15e956be202e39db196eed9912dcc01 | /estimation/prediction/machine learning/regression/lightgbm_example.py | 1b35f40b8f5ecf19138ca2343133d7ea57f275fc | []
| no_license | bthowe/data_science | c372e5364f24dc29e3de1fca3504211cb93b62fb | 63291df8084e5f62f9ba226e87db2242bb31ac94 | refs/heads/master | 2021-11-24T10:49:00.800890 | 2021-11-02T16:10:16 | 2021-11-02T16:10:16 | 106,839,857 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import sys
import joblib
import numpy as np
import pandas as pd
import lightgbm as lgb
from scipy.stats import uniform
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
def model_train_random():
X = joblib.load('../data_files/X_train.pkl')
y = joblib.load('../data_files/y_train_reg.pkl')
lgb_parameters = {
'boosting_type': ['gbdt', 'dart', 'goss'],
'max_depth': [-1, 2, 3, 4, 5],
'learning_rate': uniform(),
'n_estimators': [10, 50, 100],
'min_child_weight': uniform(),
'colsample_bytree': uniform(),
'reg_lambda': uniform()
}
grid_search = RandomizedSearchCV(
lgb.LGBMRegressor(objective='regression'),
lgb_parameters,
n_iter=100,
scoring='neg_mean_absolute_error',
verbose=10,
n_jobs=-1,
cv=5
)
grid_search.fit(X, y)
print(grid_search.best_params_)
print(grid_search.best_score_)
if __name__ == '__main__':
model_train_random()
| [
"[email protected]"
]
| |
ac2168c980477f2df2b02e347d61bec3f35f7e39 | fe035be449d42bf5d56a67c21eeb13e25db0aea6 | /backend/location/migrations/0001_initial.py | a71f5e5f305cd2b3b28fa5007d0eeaa36e1a325f | []
| no_license | crowdbotics-apps/koxlab2-23672 | 0e5fd0406bb83d449f46cd69bd4d9daf0fda763b | 889c6731266e56624ae84ac33507b01f1f0f5fc0 | refs/heads/master | 2023-02-08T20:18:56.880235 | 2021-01-04T23:16:49 | 2021-01-04T23:16:49 | 326,830,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # Generated by Django 2.2.17 on 2021-01-04 23:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MapLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
],
),
migrations.CreateModel(
name='TaskLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.TextField()),
('zip', models.CharField(max_length=6)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasklocation_location', to='location.MapLocation')),
],
),
migrations.CreateModel(
name='TaskerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
('last_updated', models.DateTimeField(auto_now=True)),
('address', models.TextField(blank=True, null=True)),
('zip', models.CharField(blank=True, max_length=6, null=True)),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerlocation_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='CustomerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zip', models.CharField(max_length=6)),
('country', models.CharField(max_length=50)),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_customer', to='task_profile.CustomerProfile')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_location', to='location.MapLocation')),
],
),
]
| [
"[email protected]"
]
| |
069dd8870f64ecd21c2129646c19c7a8c80a8090 | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/cura/CrashHandler.py | 1d85a1da54250a586aa5a5bdecf70590d13e5bbb | [
"GPL-3.0-only",
"LGPL-3.0-only"
]
| permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 16,157 | py | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import platform
import traceback
import faulthandler
import tempfile
import os
import os.path
import time
import json
import ssl
import urllib.request
import urllib.error
import certifi
from PyQt5.QtCore import QT_VERSION_STR, PYQT_VERSION_STR, QUrl
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QLabel, QTextEdit, QGroupBox, QCheckBox, QPushButton
from PyQt5.QtGui import QDesktopServices
from UM.Application import Application
from UM.Logger import Logger
from UM.View.GL.OpenGL import OpenGL
from UM.i18n import i18nCatalog
from UM.Resources import Resources
catalog = i18nCatalog("cura")
MYPY = False
if MYPY:
CuraDebugMode = False
else:
try:
from cura.CuraVersion import CuraDebugMode
except ImportError:
CuraDebugMode = False # [CodeStyle: Reflecting imported value]
# List of exceptions that should not be considered "fatal" and abort the program.
# These are primarily some exception types that we simply skip
skip_exception_types = [
SystemExit,
KeyboardInterrupt,
GeneratorExit
]
class CrashHandler:
crash_url = "https://stats.ultimaker.com/api/cura"
def __init__(self, exception_type, value, tb, has_started = True):
self.exception_type = exception_type
self.value = value
self.traceback = tb
self.has_started = has_started
self.dialog = None # Don't create a QDialog before there is a QApplication
# While we create the GUI, the information will be stored for sending afterwards
self.data = dict()
self.data["time_stamp"] = time.time()
Logger.log("c", "An uncaught error has occurred!")
for line in traceback.format_exception(exception_type, value, tb):
for part in line.rstrip("\n").split("\n"):
Logger.log("c", part)
# If Cura has fully started, we only show fatal errors.
# If Cura has not fully started yet, we always show the early crash dialog. Otherwise, Cura will just crash
# without any information.
if has_started and exception_type in skip_exception_types:
return
if not has_started:
self._send_report_checkbox = None
self.early_crash_dialog = self._createEarlyCrashDialog()
self.dialog = QDialog()
self._createDialog()
def _createEarlyCrashDialog(self):
dialog = QDialog()
dialog.setMinimumWidth(500)
dialog.setMinimumHeight(170)
dialog.setWindowTitle(catalog.i18nc("@title:window", "Cura can't start"))
dialog.finished.connect(self._closeEarlyCrashDialog)
layout = QVBoxLayout(dialog)
label = QLabel()
label.setText(catalog.i18nc("@label crash message", """<p><b>Oops, Ultimaker Cura has encountered something that doesn't seem right.</p></b>
<p>We encountered an unrecoverable error during start up. It was possibly caused by some incorrect configuration files. We suggest to backup and reset your configuration.</p>
<p>Backups can be found in the configuration folder.</p>
<p>Please send us this Crash Report to fix the problem.</p>
"""))
label.setWordWrap(True)
layout.addWidget(label)
# "send report" check box and show details
self._send_report_checkbox = QCheckBox(catalog.i18nc("@action:button", "Send crash report to Ultimaker"), dialog)
self._send_report_checkbox.setChecked(True)
show_details_button = QPushButton(catalog.i18nc("@action:button", "Show detailed crash report"), dialog)
show_details_button.setMaximumWidth(200)
show_details_button.clicked.connect(self._showDetailedReport)
show_configuration_folder_button = QPushButton(catalog.i18nc("@action:button", "Show configuration folder"), dialog)
show_configuration_folder_button.setMaximumWidth(200)
show_configuration_folder_button.clicked.connect(self._showConfigurationFolder)
layout.addWidget(self._send_report_checkbox)
layout.addWidget(show_details_button)
layout.addWidget(show_configuration_folder_button)
# "backup and start clean" and "close" buttons
buttons = QDialogButtonBox()
buttons.addButton(QDialogButtonBox.Close)
buttons.addButton(catalog.i18nc("@action:button", "Backup and Reset Configuration"), QDialogButtonBox.AcceptRole)
buttons.rejected.connect(self._closeEarlyCrashDialog)
buttons.accepted.connect(self._backupAndStartClean)
layout.addWidget(buttons)
return dialog
def _closeEarlyCrashDialog(self):
if self._send_report_checkbox.isChecked():
self._sendCrashReport()
os._exit(1)
## Backup the current resource directories and create clean ones.
def _backupAndStartClean(self):
Resources.factoryReset()
self.early_crash_dialog.close()
def _showConfigurationFolder(self):
path = Resources.getConfigStoragePath()
QDesktopServices.openUrl(QUrl.fromLocalFile( path ))
def _showDetailedReport(self):
self.dialog.exec_()
## Creates a modal dialog.
def _createDialog(self):
self.dialog.setMinimumWidth(640)
self.dialog.setMinimumHeight(640)
self.dialog.setWindowTitle(catalog.i18nc("@title:window", "Crash Report"))
# if the application has not fully started, this will be a detailed report dialog which should not
# close the application when it's closed.
if self.has_started:
self.dialog.finished.connect(self._close)
layout = QVBoxLayout(self.dialog)
layout.addWidget(self._messageWidget())
layout.addWidget(self._informationWidget())
layout.addWidget(self._exceptionInfoWidget())
layout.addWidget(self._logInfoWidget())
layout.addWidget(self._userDescriptionWidget())
layout.addWidget(self._buttonsWidget())
def _close(self):
os._exit(1)
def _messageWidget(self):
label = QLabel()
label.setText(catalog.i18nc("@label crash message", """<p><b>A fatal error has occurred in Cura. Please send us this Crash Report to fix the problem</p></b>
<p>Please use the "Send report" button to post a bug report automatically to our servers</p>
"""))
return label
def _informationWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "System information"))
layout = QVBoxLayout()
label = QLabel()
try:
from UM.Application import Application
self.cura_version = Application.getInstance().getVersion()
except:
self.cura_version = catalog.i18nc("@label unknown version of Cura", "Unknown")
crash_info = "<b>" + catalog.i18nc("@label Cura version number", "Cura version") + ":</b> " + str(self.cura_version) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label Type of platform", "Platform") + ":</b> " + str(platform.platform()) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "Qt version") + ":</b> " + str(QT_VERSION_STR) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label", "PyQt version") + ":</b> " + str(PYQT_VERSION_STR) + "<br/>"
crash_info += "<b>" + catalog.i18nc("@label OpenGL version", "OpenGL") + ":</b> " + str(self._getOpenGLInfo()) + "<br/>"
label.setText(crash_info)
layout.addWidget(label)
group.setLayout(layout)
self.data["cura_version"] = self.cura_version
self.data["os"] = {"type": platform.system(), "version": platform.version()}
self.data["qt_version"] = QT_VERSION_STR
self.data["pyqt_version"] = PYQT_VERSION_STR
return group
def _getOpenGLInfo(self):
opengl_instance = OpenGL.getInstance()
if not opengl_instance:
self.data["opengl"] = {"version": "n/a", "vendor": "n/a", "type": "n/a"}
return catalog.i18nc("@label", "Not yet initialized<br/>")
info = "<ul>"
info += catalog.i18nc("@label OpenGL version", "<li>OpenGL Version: {version}</li>").format(version = opengl_instance.getOpenGLVersion())
info += catalog.i18nc("@label OpenGL vendor", "<li>OpenGL Vendor: {vendor}</li>").format(vendor = opengl_instance.getGPUVendorName())
info += catalog.i18nc("@label OpenGL renderer", "<li>OpenGL Renderer: {renderer}</li>").format(renderer = opengl_instance.getGPUType())
info += "</ul>"
self.data["opengl"] = {"version": opengl_instance.getOpenGLVersion(), "vendor": opengl_instance.getGPUVendorName(), "type": opengl_instance.getGPUType()}
return info
def _exceptionInfoWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "Error traceback"))
layout = QVBoxLayout()
text_area = QTextEdit()
trace_list = traceback.format_exception(self.exception_type, self.value, self.traceback)
trace = "".join(trace_list)
text_area.setText(trace)
text_area.setReadOnly(True)
layout.addWidget(text_area)
group.setLayout(layout)
# Parsing all the information to fill the dictionary
summary = ""
if len(trace_list) >= 1:
summary = trace_list[len(trace_list)-1].rstrip("\n")
module = [""]
if len(trace_list) >= 2:
module = trace_list[len(trace_list)-2].rstrip("\n").split("\n")
module_split = module[0].split(", ")
filepath_directory_split = module_split[0].split("\"")
filepath = ""
if len(filepath_directory_split) > 1:
filepath = filepath_directory_split[1]
directory, filename = os.path.split(filepath)
line = ""
if len(module_split) > 1:
line = int(module_split[1].lstrip("line "))
function = ""
if len(module_split) > 2:
function = module_split[2].lstrip("in ")
code = ""
if len(module) > 1:
code = module[1].lstrip(" ")
# Using this workaround for a cross-platform path splitting
split_path = []
folder_name = ""
# Split until reach folder "cura"
while folder_name != "cura":
directory, folder_name = os.path.split(directory)
if not folder_name:
break
split_path.append(folder_name)
# Look for plugins. If it's not a plugin, the current cura version is set
isPlugin = False
module_version = self.cura_version
module_name = "Cura"
if split_path.__contains__("plugins"):
isPlugin = True
# Look backwards until plugin.json is found
directory, name = os.path.split(filepath)
while not os.listdir(directory).__contains__("plugin.json"):
directory, name = os.path.split(directory)
json_metadata_file = os.path.join(directory, "plugin.json")
try:
with open(json_metadata_file, "r", encoding = "utf-8") as f:
try:
metadata = json.loads(f.read())
module_version = metadata["version"]
module_name = metadata["name"]
except json.decoder.JSONDecodeError:
# Not throw new exceptions
Logger.logException("e", "Failed to parse plugin.json for plugin %s", name)
except:
# Not throw new exceptions
pass
exception_dict = dict()
exception_dict["traceback"] = {"summary": summary, "full_trace": trace}
exception_dict["location"] = {"path": filepath, "file": filename, "function": function, "code": code, "line": line,
"module_name": module_name, "version": module_version, "is_plugin": isPlugin}
self.data["exception"] = exception_dict
return group
def _logInfoWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "Logs"))
layout = QVBoxLayout()
text_area = QTextEdit()
tmp_file_fd, tmp_file_path = tempfile.mkstemp(prefix = "cura-crash", text = True)
os.close(tmp_file_fd)
with open(tmp_file_path, "w", encoding = "utf-8") as f:
faulthandler.dump_traceback(f, all_threads=True)
with open(tmp_file_path, "r", encoding = "utf-8") as f:
logdata = f.read()
text_area.setText(logdata)
text_area.setReadOnly(True)
layout.addWidget(text_area)
group.setLayout(layout)
self.data["log"] = logdata
return group
def _userDescriptionWidget(self):
group = QGroupBox()
group.setTitle(catalog.i18nc("@title:groupbox", "User description" +
" (Note: Developers may not speak your language, please use English if possible)"))
layout = QVBoxLayout()
# When sending the report, the user comments will be collected
self.user_description_text_area = QTextEdit()
self.user_description_text_area.setFocus(True)
layout.addWidget(self.user_description_text_area)
group.setLayout(layout)
return group
def _buttonsWidget(self):
buttons = QDialogButtonBox()
buttons.addButton(QDialogButtonBox.Close)
# Like above, this will be served as a separate detailed report dialog if the application has not yet been
# fully loaded. In this case, "send report" will be a check box in the early crash dialog, so there is no
# need for this extra button.
if self.has_started:
buttons.addButton(catalog.i18nc("@action:button", "Send report"), QDialogButtonBox.AcceptRole)
buttons.accepted.connect(self._sendCrashReport)
buttons.rejected.connect(self.dialog.close)
return buttons
def _sendCrashReport(self):
# Before sending data, the user comments are stored
self.data["user_info"] = self.user_description_text_area.toPlainText()
# Convert data to bytes
binary_data = json.dumps(self.data).encode("utf-8")
# CURA-6698 Create an SSL context and use certifi CA certificates for verification.
context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)
context.load_verify_locations(cafile = certifi.where())
# Submit data
kwoptions = {"data": binary_data,
"timeout": 5,
"context": context}
Logger.log("i", "Sending crash report info to [%s]...", self.crash_url)
if not self.has_started:
print("Sending crash report info to [%s]...\n" % self.crash_url)
try:
f = urllib.request.urlopen(self.crash_url, **kwoptions)
Logger.log("i", "Sent crash report info.")
if not self.has_started:
print("Sent crash report info.\n")
f.close()
except urllib.error.HTTPError as e:
Logger.logException("e", "An HTTP error occurred while trying to send crash report")
if not self.has_started:
print("An HTTP error occurred while trying to send crash report: %s" % e)
except Exception as e: # We don't want any exception to cause problems
Logger.logException("e", "An exception occurred while trying to send crash report")
if not self.has_started:
print("An exception occurred while trying to send crash report: %s" % e)
os._exit(1)
def show(self):
# must run the GUI code on the Qt thread, otherwise the widgets on the dialog won't react correctly.
Application.getInstance().callLater(self._show)
def _show(self):
# When the exception is in the skip_exception_types list, the dialog is not created, so we don't need to show it
if self.dialog:
self.dialog.exec_()
os._exit(1)
| [
"[email protected]"
]
| |
5d1ad3f78ce3801ebda2fa11170ef8e2a873fa60 | be9a1995c4a3a5eefcfe665801705e5eefa4f4d6 | /backlog.py | f41edd25708886a8185e80b2f8388e3e9cd07bbe | []
| no_license | intelliflovrk/SpareTime | c377bc867fe45a7fd4150095506d3af5e532960f | 975d2150e09e13ec5f3b5bec0016555f5f6ba0b0 | refs/heads/master | 2022-07-28T01:01:12.472879 | 2020-05-24T18:23:51 | 2020-05-24T18:23:51 | 260,218,638 | 0 | 2 | null | 2020-05-24T18:23:53 | 2020-04-30T13:26:55 | Python | UTF-8 | Python | false | false | 1,047 | py | fruit = ["apple", "banana", "mango"]
veg = ['carrot', 'beans', 'potato']
drink = ['milk', 'water', 'juice']
#T1
""" Create a function here that print what kind of item is given from the above lists.
Example: if 'apple' given then it should print 'apple is a fruit.' """
#T2
"""Create a function that accepts unlimited lists and return a new_list with
all combined items from given lists(params)."""
#T3
"""Create a function which accepts a parameter(only string) and print its key where the
value is present by parsing the json file(food.json).
Example: if "milk" is given then it should print "milk is located in item4"
Please find the attached sample json file."""
#T4
"""Write a Python function that accepts a string and calculate the
number of upper case letters and lower case letters. Go to the editor
Sample String : 'Raj Playing COD'
Expected Output :
No. of Upper case characters : 5
No. of Lower case Characters : 8 """
#T5
"""Create a python script for BigBank user to manage current account using BigBank.png flowchart."""
| [
"[email protected]"
]
| |
1f3d959606679dc7ba5947b12daca67bc6146d51 | 13b558c3541ff00402f206c8c82a8ced18f3a76c | /test/kernel/test_kernel_relu.py | 2cb10b308b11f1588ed60af35c03ceb345034eda | [
"MIT"
]
| permissive | lvdongxu/UnarySim | aeafa4fff35319a1ccdaca6bd4f227d5f16ccf57 | e32531e452e7831d5e03f9f84b5f35d7e9bceaa9 | refs/heads/master | 2023-06-18T19:56:50.434117 | 2021-07-19T20:27:21 | 2021-07-19T20:27:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | py | # %%
import torch
from UnarySim.kernel.relu import FSUReLU
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.metric.metric import ProgError
import matplotlib.pyplot as plt
import time
import math
import numpy as np
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %%
def test(rng="Sobol",
mode="bipolar",
bitwidth=8,
buf_dep=8,
total_cnt=100,
sr=False
):
stype = torch.float
btype = torch.float
rtype = torch.float
print("========================================================")
print(rng + " " + mode + " using shift register: " + sr)
print("========================================================")
# all input values are non-negative
low_bound = 0
if mode == "unipolar":
up_bound = 2**bitwidth
elif mode == "bipolar":
low_bound = -2**(bitwidth-1)
up_bound = 2**(bitwidth-1)
input_list = []
for input_val in range(low_bound, up_bound+1, 1):
input_list.append(input_val)
input = torch.tensor(input_list).type(torch.float).div(up_bound).to(device)
output = torch.nn.ReLU()(input).to(device)
result_pe_total = []
for rand_idx in range(1, total_cnt+1):
outputPE = ProgError(output, mode=mode).to(device)
inputPE = ProgError(input, mode=mode).to(device)
inputSRC = SourceGen(input, bitwidth, mode=mode, rtype=rtype)().to(device)
dut = FSUReLU(depth=buf_dep, bitwidth=bitwidth, encode="RC", shiftreg=sr, stype=stype, btype=btype).to(device)
inputRNG = RNG(bitwidth, rand_idx, rng, rtype)().to(device)
inputBS = BSGen(inputSRC, inputRNG, stype).to(device)
with torch.no_grad():
start_time = time.time()
for i in range(2**bitwidth):
input_bs = inputBS(torch.tensor([i]))
inputPE.Monitor(input_bs)
ouyput_bs = dut(input_bs)
outputPE.Monitor(ouyput_bs)
# get the result for different rng
result_pe = outputPE()[1].cpu().numpy()
result_pe_total.append(result_pe)
# get the result for different rng
result_pe_total = np.array(result_pe_total)
#######################################################################
# check the error of all simulation
#######################################################################
print("RMSE:{:1.4}".format(math.sqrt(np.mean(result_pe_total**2))))
print("MAE: {:1.4}".format(np.mean(np.abs(result_pe_total))))
print("bias:{:1.4}".format(np.mean(result_pe_total)))
print("max: {:1.4}".format(np.max(result_pe_total)))
print("min: {:1.4}".format(np.min(result_pe_total)))
#######################################################################
# check the error according to input value
#######################################################################
max_total = np.max(result_pe_total, axis=0)
min_total = np.min(result_pe_total, axis=0)
avg_total = np.mean(result_pe_total, axis=0)
axis_len = outputPE()[1].size()[0]
input_x_axis = []
for axis_index in range(axis_len):
input_x_axis.append((axis_index/(axis_len-1)*(up_bound-low_bound)+low_bound)/up_bound)
fig, ax = plt.subplots()
ax.fill_between(input_x_axis, max_total, avg_total, facecolor="red", alpha=0.75)
ax.fill_between(input_x_axis, avg_total, min_total, facecolor="blue", alpha=0.75)
ax.plot(input_x_axis, avg_total, label='Avg error', color="black", linewidth=0.3)
plt.tight_layout()
plt.xlabel('Input value')
plt.ylabel('Output error')
plt.xticks(np.arange(-1.0, 1.1, step=0.5))
# ax.xaxis.set_ticklabels([])
plt.xlim(-1, 1)
plt.yticks(np.arange(-1.0, 1.0, step=0.2))
# ax.yaxis.set_ticklabels([])
plt.ylim(-1, 1)
plt.grid(b=True, which="both", axis="y", linestyle="--", color="grey", linewidth=0.3)
fig.set_size_inches(4, 4)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.show()
plt.close()
# %%
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=5, sr=False)
# %%
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=16, sr=True)
test(rng="Sobol", mode="bipolar", total_cnt=100, bitwidth=8, buf_dep=4, sr=True) | [
"[email protected]"
]
| |
a8575196a736a5712f565182b319b6ac80fd0bda | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_REPO/PLURALSIGHT/ps-python-library-public/psdata_googlecloud.py | e91f889eb210d2c07286cb2d6c55f4d4b9109111 | [
"MIT"
]
| permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 22,332 | py | #!/usr/bin/env python
import httplib2
import pprint
import sys
import time
from apiclient.discovery import build
from apiclient.errors import HttpError
#from oauth2client.client import SignedJwtAssertionCredentials
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client.client import AccessTokenRefreshError
import logging
logging.basicConfig() #included to avoid message when oauth2client tries to write to log
# some of this code built on this project: https://code.google.com/p/google-bigquery-tools/source/browse/samples/python/appengine-bq-join
# some of this code comes from the following link: https://developers.google.com/bigquery/bigquery-api-quickstart
# to build the service object follow setps in the service account section here: https://developers.google.com/bigquery/docs/authorization#service-accounts
# for more on the API... https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/
# Number of bytes to send/receive in each request.
CHUNKSIZE = 16 * (256 * 1024) #must be multiple of 256KB which is the calc in parenthesis
#old setting CHUNKSIZE = 2 * 1024 * 1024
# Retry transport and file IO errors.
RETRYABLE_ERRORS = (httplib2.HttpLib2Error, IOError)
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
# Number of times to retry operations that are configured to allow retries
NUM_RETRIES = 2
def gcloud_connect(service_account, client_secret_file, scope):
"""Create authenticated token for Google Cloud
Args:
service_account: service account email address, should be formatted like [email protected]
client_secret_file: local path to the .p12 file downloaded from your project's Credentials page
scope: string indicating the google cloud scope, such as 'https://www.googleapis.com/auth/bigquery'
Returns:
Authorized HTTP object, result of running SignedJwtAssertionCredentials.authorize()
"""
with open(client_secret_file, 'rb') as f:
#f = file(client_secret_file, 'rb')
key = f.read()
credentials = ServiceAccountCredentials.from_p12_keyfile(
service_account,
client_secret_file,
scopes=scope)
http = httplib2.Http()
http = credentials.authorize(http)
return http
def query_table(service, project_id,query):
""" Run a query against Google BigQuery. Returns a list with the results.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of google project which you want to query.
query: string, Query to excecute on BigQuery. Example: 'Select max(Date) from dataset.table'
Returns:
A list with the query results (excluding column names)
"""
jobCollection = service.jobs()
try:
query_body = {"query": query}
query_result = jobCollection.query(projectId=project_id,body=query_body).execute()
result_list=[]
for row in query_result['rows']:
result_row=[]
for field in row['f']:
result_row.append(field['v'])
result_list.append(result_row)
return result_list
except HttpError as err:
print 'Error:', pprint.pprint(err.content)
except AccessTokenRefreshError:
print ("Credentials have been revoked or expired, please re-run"
"the application to re-authorize")
except KeyError:
print "Key Error - no results"
def cloudstorage_upload(service, project_id, bucket, source_file,dest_file, show_status_messages=True):
"""Upload a local file to a Cloud Storage bucket.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project to upload to
bucket: string, Name of Cloud Storage bucket (exclude the "gs://" prefix)
source_file: string, Path to the local file to upload
dest_file: string, Name to give the file on Cloud Storage
Returns:
Response of the upload in a JSON format
"""
#Starting code for this function is a combination from these sources:
# https://code.google.com/p/google-cloud-platform-samples/source/browse/file-transfer-json/chunked_transfer.py?repo=storage
# https://developers.google.com/api-client-library/python/guide/media_upload
from apiclient.http import MediaFileUpload
filename = source_file
bucket_name = bucket
object_name = dest_file
assert bucket_name and object_name
if show_status_messages:
print('Upload request for {0}'.format(source_file))
media = MediaFileUpload(filename, chunksize=CHUNKSIZE, resumable=True)
if not media.mimetype():
media = MediaFileUpload(filename, DEFAULT_MIMETYPE, resumable=True)
request = service.objects().insert(bucket=bucket_name, name=object_name,
media_body=media)
response = request.execute()
if show_status_messages:
print('Upload complete')
return response
def cloudstorage_download(service, project_id, bucket, source_file, dest_file, show_status_messages=True):
"""Download a file from a Cloud Storage bucket to a local file.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project to download from
bucket: string, Name of Cloud Storage bucket (exclude the "gs://" prefix)
source_file: string, Path to the file to download on Cloud Storage
dest_file: string, Name to give the downloaded file
Returns:
None
"""
#Starting code for this function is a combination from these sources:
# https://code.google.com/p/google-cloud-platform-samples/source/browse/file-transfer-json/chunked_transfer.py?repo=storage
from apiclient.http import MediaIoBaseDownload
filename = dest_file
bucket_name = bucket
object_name = source_file
assert bucket_name and object_name
if show_status_messages:
print('Download request for {0}'.format(source_file))
#media = MediaFileUpload(filename, chunksize=CHUNKSIZE, resumable=True)
#if not media.mimetype():
# media = MediaFileUpload(filename, DEFAULT_MIMETYPE, resumable=True)
f = file(filename, 'w')
request = service.objects().get_media(bucket=bucket_name, object=object_name)
#response = request.execute()
media = MediaIoBaseDownload(f, request, chunksize=CHUNKSIZE)
progressless_iters = 0
done = False
while not done:
error = None
try:
p, done = media.next_chunk()
except HttpError, err:
error = err
if err.resp.status < 500:
raise
except RETRYABLE_ERRORS, err:
error = err
if error:
progressless_iters += 1
#handle_progressless_iter(error, progressless_iters)
if progressless_iters > NUM_RETRIES:
if show_status_messages:
print('Failed to make progress for too many consecutive iterations.')
raise error
sleeptime = random.random() * (2**progressless_iters)
if show_status_messages:
print ('Caught exception (%s). Sleeping for %s seconds before retry #%d.'
% (str(error), sleeptime, progressless_iters))
time.sleep(sleeptime)
else:
progressless_iters = 0
if show_status_messages:
print('Download complete')
def cloudstorage_delete(service, project_id, bucket, filename, show_status_messages=True):
"""Delete file from a Cloud Storage bucket.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
bucket: string, Name of Cloud Storage bucket (exclude the "gs://" prefix)
file: string, Path to the file to delete on Cloud Storage
Returns:
None
"""
bucket_name = bucket
object_name = filename
if show_status_messages:
print('Delete request for {0}/{1}'.format(bucket_name,object_name))
obj = service.objects()
result = obj.delete(bucket = bucket_name, object = object_name).execute()
#if show_status_messages:
#print result
#print('{0}/{1} deleted'.format(bucket_name,object_name))
def gsutil_download(service,source_path,source_file, dest_path, parallel=True):
"""Download file(s) from Google Cloud Storage using gsutil command (must be installed on machine)
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
source_file: string, Path to the file to download (include * as wildcard for multiple files)
dest_path: string, Name or path for the downloaded file(s)
parallel: True (default) to run multiple file download in parallel
Returns:
None
"""
from subprocess import call
#strftime("%Y_%m_%d")
if parallel:
parallel_param = '-m'
else:
parallel_param = ''
call(["gsutil", parallel_param, "cp", source_path + source_file, dest_path])
def gsutil_delete(service, path, parallel=True):
"""Delete file(s) from Google Cloud Storage using gsutil command (must be installed on machine)
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
path: string, Name for the file(s) to delete (* as wildcard for multiple files)
parallel: True (default) to run multiple file delete in parallel
Returns:
None
"""
from subprocess import call
#strftime("%Y_%m_%d")
if parallel:
parallel_param = '-m'
else:
parallel_param = ''
call(["gsutil", parallel_param, "rm", path])
def delete_table(service, project_id,dataset_id,table):
"""Delete a BigQuery table.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project table resides in
dataset_id: string, Name of dataset table resides in
table: string, Name of table to delete (make sure you get this one right!)
Returns:
Response from BigQuery in a JSON format
"""
tables_object = service.tables()
req = tables_object.delete(projectId=project_id,datasetId=dataset_id,tableId=table)
result = req.execute()
return result
def job_status_loop(project_id, jobCollection, insertResponse,waitTimeSecs=10):
"""Monitors BigQuery job and prints out status until the job is complete.
Args:
project_id: string, Name of Google project table resides in
jobCollection: jobs() object, Name of jobs() object that called the job insert
insertResponse: JSON object, The JSON object returned when calling method jobs().insert().execute()
waitTimeSecs: integer, Number of seconds to wait between checking job status
Returns:
Nothing
"""
while True:
job = jobCollection.get(projectId=project_id,
jobId=insertResponse['jobReference']['jobId']).execute()
if 'DONE' == job['status']['state']:
print 'Done Loading!'
if 'errorResult' in job['status']:
print 'Error loading table: ', pprint.pprint(job)
return
print 'Waiting for loading to complete...'
time.sleep(waitTimeSecs)
if 'errorResult' in job['status']:
print 'Error loading table: ', pprint.pprint(job)
return
def list_datasets(service, project_id):
"""Lists BigQuery datasets.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
Returns:
List containing dataset names
"""
datasets = service.datasets()
response = datasets.list(projectId=PROJECT_NUMBER).execute()
dataset_list = []
for field in response['datasets']:
dataset_list.append(field['datasetReference']['datasetId'])
return dataset_list
def load_table_from_file(service, project_id, dataset_id, targettable, sourceCSV,field_list=None,delimiter='\t',skipLeadingRows=0, overwrite=False):
"""Loads a table in BigQuery from a delimited file (default is tab delimited).
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of Google project
dataset_id: string, Name of dataset table resides in
targettable: string, Name of table to create or append data to
sourceCSV: string, Path of the file to load
field_list: list, Schema of the file to be loaded
delimiter: string, Column delimiter for file, default is tab (optional)
skipLeadingRows: integer, Number of rows to skip, default is 0 (optional)
overwrite: boolean, defaults to False which will append data to table, True would overwrite
Returns:
Returns job response object. Prints out job status every 10 seconds.
"""
jobCollection = service.jobs()
# Set if overwriting or appending to table
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
else:
write_disposition = 'WRITE_APPEND'
jobData = {
'projectId': project_id,
'configuration': {
'load': {
'sourceUris': [sourceCSV],
'fieldDelimiter': delimiter,
'schema':
{
'fields': field_list
},
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': targettable
},
'skipLeadingRows': skipLeadingRows,
'createDisposition': 'CREATE_IF_NEEDED',
'writeDisposition': write_disposition,
}
}
}
insertResponse = jobCollection.insert(projectId=project_id, body=jobData).execute()
job_status_loop(project_id,jobCollection,insertResponse)
return insertResponse
def load_table_from_json(service, project_id, dataset_id, target_table, source_file, field_list=None, overwrite=False):
"""Load a local JSON data file to a BigQuery table.
Example field list:
field_list = [ {'name': 'ID', 'type': 'INTEGER'}, {'name': 'Day', 'type': 'TIMESTAMP'},
{'name': 'ViewTimeInMinutes', 'type': 'FLOAT'}, {'name': 'LoadDate', 'type': 'TIMESTAMP'}]
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of google project
dataset_id: string, Name of dataset for the table
target_table: string, Name of table to write to
source_file: string, path for the source file within google cloud
field_list: list of json entries representing field and datatype, such as {'name': 'ID', 'type': 'INTEGER'}
overwrite: boolean, defaults to False which will append data to table, True would overwrite
Returns:
None
"""
jobCollection = service.jobs()
import json
# Set if overwriting or appending to table
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
else:
write_disposition = 'WRITE_APPEND'
jobData = {
'projectId': project_id,
'configuration': {
'load': {
'sourceUris': [source_file],
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'schema':
{
'fields': field_list
},
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': target_table
},
'createDisposition': 'CREATE_IF_NEEDED',
'writeDisposition': write_disposition, # [Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.
}
}
}
insertResponse = jobCollection.insert(projectId=project_id, body=jobData).execute()
job_status_loop(project_id,jobCollection,insertResponse)
def load_table(service, project_id, job_data):
"""This is a basic wrapper for the google big query jobs.insert() method.
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of google project
job_data: json with job details
Returns:
None
"""
jobCollection = service.jobs()
insertResponse = jobCollection.insert(projectId=project_id, body=job_data).execute()
job_status_loop(project_id,jobCollection,insertResponse)
def load_from_query(service, project_id, dataset_id, target_table, source_query,overwrite = False):
"""
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of google project
dataset_id: string, Name of dataset for the destination table
target_table: string, Name of table to write to
source_query: string, query to run on BigQuery for the source data (keep the resultset small or this will fail)
overwrite: boolean, set as True to ovewrite data in destination table (optional)
Returns:
None
"""
job_collection = service.jobs()
if overwrite:
write_disposition = 'WRITE_TRUNCATE'
else:
write_disposition = 'WRITE_APPEND'
job_data = {
'projectId': project_id,
'configuration': {
'query': {
'allowLargeResults': 'True',
'flattenResults': 'True', # [Experimental] Flattens all nested and repeated fields in the query results. The default value is true. allowLargeResults must be true if this is set to false.
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': target_table,
},
'priority': 'BATCH',
'writeDisposition': write_disposition, # [Optional] Specifies the action that occurs if the destination table already exists. The following values are supported: WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data. WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion.
'createDisposition': 'CREATE_IF_NEEDED', # [Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion.
'query':source_query,
},
},
}
response = job_collection.insert(projectId=project_id, body=job_data).execute()
#print response
job_status_loop(project_id,job_collection,response)
def export_table(service, project_id, dataset_id, source_table, destination_uris, compress = False, delimiter = '\t', print_header = True):
"""Export BigQuery table to file(s)
Args:
service: BigQuery service object that is authenticated. Example: service = build('bigquery','v2', http=http)
project_id: string, Name of google project
dataset_id: string, Name of dataset for the destination table
source_table: string, Name of table to export to files
destination_uris: list, Path(s) where data will be saved (include * to allow multiple files)
compress: optional, True to do gzip compression
delimiter: string, Defaults to tab delimited '\t'
Returns:
None
"""
job_collection = service.jobs()
if compress:
compression = 'GZIP'
else:
compression = 'NONE'
destination_format = 'CSV'
job_data = {
'projectId': project_id,
'configuration': {
'extract': {
'destinationUris': destination_uris,
'compression': compression,
'fieldDelimiter': delimiter,
'printHeader': print_header,
'sourceTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': source_table,
},
},
},
}
response = job_collection.insert(projectId=project_id, body=job_data).execute()
#print response
job_status_loop(project_id,job_collection,response)
| [
"[email protected]"
]
| |
d79fda38aff980036caca4a217945a5d4e535590 | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /sagemaker-inference-recommender/tensorflow-cloudwatch/code/inference.py | 6c01046988f46bc972a2ad0fae4f1977a5ae7f98 | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,746 | py | import io
import json
import numpy as np
from PIL import Image
def input_handler(data, context):
""" Pre-process request input before it is sent to TensorFlow Serving REST API
https://github.com/aws/amazon-sagemaker-examples/blob/0e57a288f54910a50dcbe3dfe2acb8d62e3b3409/sagemaker-python-sdk/tensorflow_serving_container/sample_utils.py#L61
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
if context.request_content_type == 'application/x-image':
buf = np.fromstring(data.read(), np.uint8)
image = Image.open(io.BytesIO(buf)).resize((224, 224))
image = np.array(image)
image = np.expand_dims(image, axis=0)
return json.dumps({"instances": image.tolist()})
else:
_return_error(415, 'Unsupported content type "{}"'.format(
context.request_content_type or 'Unknown'))
def output_handler(response, context):
"""Post-process TensorFlow Serving output before it is returned to the client.
Args:
response (obj): the TensorFlow serving response
context (Context): an object containing request and configuration details
Returns:
(bytes, string): data to return to client, response content type
"""
if response.status_code != 200:
_return_error(response.status_code, response.content.decode('utf-8'))
response_content_type = context.accept_header
prediction = response.content
return prediction, response_content_type
def _return_error(code, message):
raise ValueError('Error: {}, {}'.format(str(code), message)) | [
"[email protected]"
]
| |
b4842432df98cdecfd9ed798a4883fad4fd5ec9b | 6bce144a2dc9293f290207d1c6c2d08a63763cd2 | /napari/_vispy/_tests/test_vispy_vectors_layer.py | cee34be9af2a9da5fa15e8c2b4f47f1a69d8ab2a | [
"BSD-3-Clause"
]
| permissive | tlambert03/napari | 0f7b90de5333b520567a7eb9f00dea5c15fa448c | 19867df427b1eb1e503618a1ab109e7210ae8a83 | refs/heads/main | 2023-08-30T21:32:29.433620 | 2023-05-08T13:58:18 | 2023-05-08T13:58:18 | 216,388,440 | 5 | 0 | BSD-3-Clause | 2023-05-01T07:58:42 | 2019-10-20T16:02:35 | Python | UTF-8 | Python | false | false | 1,382 | py | import numpy as np
import pytest
from napari._vispy.layers.vectors import (
generate_vector_meshes,
generate_vector_meshes_2D,
)
@pytest.mark.parametrize(
"edge_width, length, dims", [[0, 0, 2], [0.3, 0.3, 2], [1, 1, 3]]
)
def test_generate_vector_meshes(edge_width, length, dims):
n = 10
data = np.random.random((n, 2, dims))
vertices, faces = generate_vector_meshes(
data, width=edge_width, length=length
)
vertices_length, vertices_dims = vertices.shape
faces_length, faces_dims = faces.shape
if dims == 2:
assert vertices_length == 4 * n
assert faces_length == 2 * n
elif dims == 3:
assert vertices_length == 8 * n
assert faces_length == 4 * n
assert vertices_dims == dims
assert faces_dims == 3
@pytest.mark.parametrize(
"edge_width, length, p",
[[0, 0, (1, 0, 0)], [0.3, 0.3, (0, 1, 0)], [1, 1, (0, 0, 1)]],
)
def test_generate_vector_meshes_2D(edge_width, length, p):
n = 10
dims = 2
data = np.random.random((n, 2, dims))
vertices, faces = generate_vector_meshes_2D(
data, width=edge_width, length=length, p=p
)
vertices_length, vertices_dims = vertices.shape
faces_length, faces_dims = faces.shape
assert vertices_length == 4 * n
assert vertices_dims == dims
assert faces_length == 2 * n
assert faces_dims == 3
| [
"[email protected]"
]
| |
1e5273c1dacc874b90160d3690e51cca256c9cef | 3b7ea74de26931e95eb76d1d27621e5d744f81f4 | /hashtat/hashing/migrations/0001_initial.py | 3e4bbb2dc78a2ba9357a4a0f5dad48615c707d00 | []
| no_license | RoodrigoRoot/TDD-django | abf7c70bde2c6095a27ad6415330476449f71f4a | 6ad752cb75dbef3c6f720b071f3db61c2409bd23 | refs/heads/main | 2023-03-24T11:47:43.683202 | 2021-03-23T17:02:34 | 2021-03-23T17:02:34 | 350,515,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # Generated by Django 3.1.7 on 2021-03-22 18:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hash',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('haash', models.CharField(max_length=64)),
],
),
]
| [
"[email protected]"
]
| |
7b897209074e91145f84ce321c1f8c1d4c601389 | 63ec00220da0cbaf125bf2e879ff63ce432f7227 | /tests/multiloop/test_alternative_loops.py | 000aed652ca7d15d93e11eaa8c07acccd54c801e | [
"Apache-2.0"
]
| permissive | munderseth/pytest-asyncio | 3cbfc49a82f5f8d503f5429d43bc3720993ecee4 | 2f37e873e0977861d24a018e06fa9f469470eaf0 | refs/heads/master | 2021-07-24T22:10:19.852111 | 2017-11-03T22:41:09 | 2017-11-03T22:41:09 | 109,448,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | """Unit tests for overriding the event loop."""
import asyncio
import pytest
@pytest.mark.asyncio
def test_for_custom_loop():
"""This test should be executed using the custom loop."""
yield from asyncio.sleep(0.01)
assert type(asyncio.get_event_loop()).__name__ == "CustomSelectorLoop"
@pytest.mark.asyncio
@asyncio.coroutine
def test_dependent_fixture(dependent_fixture):
yield from asyncio.sleep(0.1)
| [
"[email protected]"
]
| |
792a9513fc4cc2b3ecbcf30866ddd9d8a3289cac | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/前端/juejin_1897.py | 5d026126c1aded18cc586d2c21f786748269ce84 | []
| no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,742 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "7002236490654859278", "article_info": {"article_id": "7002236490654859278", "user_id": "3677241439685368", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/e503fd8b45224f588a1722e23a219bef~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "分享按钮设计", "brief_content": "这是我参与8月更文挑战的第30天,活动详情查看:8月更文挑战 背景 按钮 是我们 编程 中使用频率最多的几个 交互元素 之一,点击它会产生其描述的动作。如果一个按钮上写着 提交 ,点击它很可能会 提交", "is_english": 0, "is_original": 1, "user_index": 6.637422133171311, "original_type": 0, "original_author": "", "content": "", "ctime": "1630335248", "mtime": "1630380484", "rtime": "1630380484", "draft_id": "6998962500158881799", "view_count": 78, "collect_count": 2, "digg_count": 4, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00655921, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3677241439685368", "user_name": "battleKing", "company": "字节跳动", "job_title": "前端高级工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/92bc7c8f257b8f83f1e4adf97ed2653b~300x300.image", "level": 2, "description": "HTML、CSS、Javascript、Vue.js", "followee_count": 42, "follower_count": 48, "post_article_count": 46, "digg_article_count": 79, "got_digg_count": 561, "got_view_count": 13431, "post_shortmsg_count": 1, "digg_shortmsg_count": 11, "isfollowed": false, "favorable_author": 0, "power": 695, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 7002236490654859278, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7005516973421363207", "article_info": {"article_id": "7005516973421363207", "user_id": "1679709499568712", "category_id": "6809637767543259144", "tag_ids": [6809641090145058824], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Flutter练习(二)— 两种方式moke本地json 数据", "brief_content": "前言 今天做仿掘金项目时调不通接口,看了下掘金现在用的接口和之前不一样(又没有接口文档),于是并想着直接moke掘金的数据进行页面的渲染吧", "is_english": 0, "is_original": 1, "user_index": 0.145118528934521, "original_type": 0, "original_author": "", "content": "", "ctime": "1631099054", "mtime": "1631511813", "rtime": "1631159270", "draft_id": "7005513946670645284", "view_count": 45, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00655707, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1679709499568712", "user_name": "Joahyan", "company": "", "job_title": "前端CV工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/54f6a40d35f217608cc9b3371d9d3576~300x300.image", "level": 1, "description": "", "followee_count": 11, "follower_count": 1, "post_article_count": 36, "digg_article_count": 43, "got_digg_count": 49, "got_view_count": 2624, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 75, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2547019, "tag_id": "6809641090145058824", "tag_name": "Flutter", "color": "", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/1519790365175e2d3ba2174d5c8f3fdc4687a8bbf5768.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1519761568, "mtime": 1631691946, "id_type": 9, "tag_alias": "", "post_article_count": 9303, "concern_user_count": 42137}], "user_interact": {"id": 7005516973421363207, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7001830823847133214", "article_info": {"article_id": "7001830823847133214", "user_id": "4204959635367640", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640501776482317, 6809640653266354190], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a685b6454cab412a97f17ba8b73b6ec9~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "微信小程序开发指南", "brief_content": "这是我参与8月更文挑战的第29天,活动详情查看:8月更文挑战 基础 插值 {{}} 元素显示隐藏 条件渲染 wx:if=\"{{条件}}\" wx:elif=\"{{条件}}\" wx:else 列表渲染 w", "is_english": 0, "is_original": 1, "user_index": 4.273514770607234, "original_type": 0, "original_author": "", "content": "", "ctime": "1630240827", "mtime": "1630291730", "rtime": "1630291730", "draft_id": "7001830082968813581", "view_count": 170, "collect_count": 1, "digg_count": 3, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00655502, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4204959635367640", "user_name": "一点儿", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/3602b2746df641f15be98fe02d6e6dfd~300x300.image", "level": 2, "description": "", "followee_count": 3, "follower_count": 6, "post_article_count": 35, "digg_article_count": 43, "got_digg_count": 144, "got_view_count": 5501, "post_shortmsg_count": 35, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 199, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546594, "tag_id": "6809640501776482317", "tag_name": "架构", "color": "#C679FF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f27d811ad7e2b2a0bc24.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439515816, "mtime": 1631692674, "id_type": 9, "tag_alias": "", "post_article_count": 10508, "concern_user_count": 338797}, {"id": 2546704, "tag_id": "6809640653266354190", "tag_name": "微信小程序", "color": "#11a600", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/a1e7773920f51db40441.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1474932627, "mtime": 1631692796, "id_type": 9, "tag_alias": "", "post_article_count": 7107, "concern_user_count": 221757}], "user_interact": {"id": 7001830823847133214, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6987314160514105375", "article_info": {"article_id": "6987314160514105375", "user_id": "3808364011721751", "category_id": "6809637767543259144", "tag_ids": [6809640495594078216, 6809640595070386184, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b6ff12758c7744f59f304fc469c1dccd~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "目前大火的 Jamstack 到底是什么?", "brief_content": "这篇文章将带你了解 Jamstack 的概念以及开发范式。我们也将讨论 Rust 与 WebAssembly 这样的新兴技术如何让 Jamstack 更快,更安全,更易用。", "is_english": 0, "is_original": 1, "user_index": 6.244115481544289, "original_type": 0, "original_author": "", "content": "", "ctime": "1626860913", "mtime": "1626862751", "rtime": "1626862751", "draft_id": "6987313349168103431", "view_count": 1180, "collect_count": 0, "digg_count": 11, "comment_count": 0, "hot_index": 70, "is_hot": 0, "rank_index": 0.00655382, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3808364011721751", "user_name": "Michael_Yuan", "company": "Second State", "job_title": "开源软件开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/11/26/16ea743d2e979d2c~tplv-t2oaga2asx-image.image", "level": 2, "description": "多年软件开发经验,著有5本关于软件开发的书籍", "followee_count": 66, "follower_count": 74, "post_article_count": 38, "digg_article_count": 22, "got_digg_count": 222, "got_view_count": 29764, "post_shortmsg_count": 29, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 519, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546590, "tag_id": "6809640495594078216", "tag_name": "Rust", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/01787a4f2859cde4db11.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1439498965, "mtime": 1631638706, "id_type": 9, "tag_alias": "", "post_article_count": 720, "concern_user_count": 7617}, {"id": 2546662, "tag_id": "6809640595070386184", "tag_name": "WebAssembly", "color": "#6E62F1", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-ad-assets/1502416465933c369eec15aac923580c0750ca89293f9.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1445886052, "mtime": 1631501460, "id_type": 9, "tag_alias": "", "post_article_count": 294, "concern_user_count": 7115}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6987314160514105375, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6981257742035992584", "article_info": {"article_id": "6981257742035992584", "user_id": "2339399368751325", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4a15f6ebef7347a29e41a23154fced98~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "基于react的简单轻便的开源图片预览组件", "brief_content": "react-dark-photo一款基于react17.x开发的图片预览组件,支持放大、缩小、下载、打印等功能。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1625450766", "mtime": "1625491105", "rtime": "1625455107", "draft_id": "6981256827665121317", "view_count": 339, "collect_count": 6, "digg_count": 89, "comment_count": 1, "hot_index": 106, "is_hot": 0, "rank_index": 0.00654792, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2339399368751325", "user_name": "饼干_", "company": "git", "job_title": "掘金优秀码农", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/6c9c4d00247887f584380c3bd67d8d6d~300x300.image", "level": 3, "description": "周更(节假日偷懒)", "followee_count": 20, "follower_count": 121, "post_article_count": 26, "digg_article_count": 3676, "got_digg_count": 2182, "got_view_count": 25299, "post_shortmsg_count": 17, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 2436, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6981257742035992584, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6999807899149008910", "article_info": {"article_id": "6999807899149008910", "user_id": "3350967171169901", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/b83ae7d8dae34639b1b40fb41128a84b~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "2021年从零开发前端项目指南", "brief_content": "前端从零到一的项目配置,涉及到 `Webpack`、`React`、`Babel`、`TypeScript`、`Ant Design`、`Sass`、`Eslint`、`Prettier`", "is_english": 0, "is_original": 1, "user_index": 2.737892541972927, "original_type": 0, "original_author": "", "content": "", "ctime": "1629769789", "mtime": "1629775036", "rtime": "1629775036", "draft_id": "6999806764623331359", "view_count": 280, "collect_count": 5, "digg_count": 7, "comment_count": 0, "hot_index": 20, "is_hot": 0, "rank_index": 0.00654655, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3350967171169901", "user_name": "windliang", "company": "美团", "job_title": "前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/11/14/1671135294868d78~tplv-t2oaga2asx-image.image", "level": 2, "description": "公众号,windliang", "followee_count": 13, "follower_count": 27, "post_article_count": 146, "digg_article_count": 12, "got_digg_count": 56, "got_view_count": 40269, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 458, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6999807899149008910, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7004816361981640717", "article_info": {"article_id": "7004816361981640717", "user_id": "2727236109865134", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640625856577549], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/c8762fb75d8643d293dcb8453d8fc609~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "# 从浏览器输入url到页面渲染都发生了什么?(下) 欢迎补充!", "brief_content": "从浏览器输入url到页面渲染都发生了什么?(下) 上一篇说到,浏览器通过网络,拿到了我们加载页面所需要的资源。 那么我们的浏览器该如何对这些资源进行操作呢? 对资源的解析 生成DOM树和渲染树 浏览器", "is_english": 0, "is_original": 1, "user_index": 1.892284039663875, "original_type": 0, "original_author": "", "content": "", "ctime": "1630935995", "mtime": "1630992198", "rtime": "1630992198", "draft_id": "7004788560595255326", "view_count": 40, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00654634, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2727236109865134", "user_name": "乔垃圾要carry", "company": "", "job_title": "大学在校", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/12660bbd0fcfacbbde3a4714df4c2801~300x300.image", "level": 1, "description": "22秋招,求内推!", "followee_count": 6, "follower_count": 0, "post_article_count": 14, "digg_article_count": 33, "got_digg_count": 25, "got_view_count": 722, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 32, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546683, "tag_id": "6809640625856577549", "tag_name": "浏览器", "color": "#47ebc7", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/baf3558e2acdfa623201.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1460153459, "mtime": 1631677186, "id_type": 9, "tag_alias": "", "post_article_count": 3341, "concern_user_count": 28324}], "user_interact": {"id": 7004816361981640717, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6997650099690012680", "article_info": {"article_id": "6997650099690012680", "user_id": "1372625489112471", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue的系统学习|各种基础知识和细节篇", "brief_content": "目录 vue基础(本篇) vue-cli vue-router vuex element-ui vue3 1.vue简介 1.1 vue是什么 一套用于构建用户界面的渐进式JavaScript框架", "is_english": 0, "is_original": 1, "user_index": 5.678873587267573, "original_type": 0, "original_author": "", "content": "", "ctime": "1629267463", "mtime": "1629354837", "rtime": "1629354837", "draft_id": "6997649576828256264", "view_count": 194, "collect_count": 1, "digg_count": 15, "comment_count": 0, "hot_index": 24, "is_hot": 0, "rank_index": 0.00653628, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1372625489112471", "user_name": "LeHan", "company": "", "job_title": "学生", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/8650dabe72d57dbce5aa6e79e7be4dbb~300x300.image", "level": 2, "description": "耐心细致,做好细节", "followee_count": 54, "follower_count": 36, "post_article_count": 45, "digg_article_count": 373, "got_digg_count": 520, "got_view_count": 6131, "post_shortmsg_count": 7, "digg_shortmsg_count": 6, "isfollowed": false, "favorable_author": 0, "power": 581, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6997650099690012680, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7006216065604648973", "article_info": {"article_id": "7006216065604648973", "user_id": "2454532902628622", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/3ea3209207e44d7e8a18cf39cb7ee2fe~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Js去除字符串空格", "brief_content": "使用js去除字符串内所带有空格,有以下三种方法 (1)replace正侧匹配方法 去除字符串内所有的空格:str=str.replace(/\\s*/g,\"\"); 去除字符串内两头的空格:str=str", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1631261836", "mtime": "1631263032", "rtime": "1631263032", "draft_id": "7006199760856023071", "view_count": 46, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00654307, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2454532902628622", "user_name": "郑九", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/6fa93a080924d591fccc7edde01351d9~300x300.image", "level": 1, "description": "", "followee_count": 8, "follower_count": 0, "post_article_count": 4, "digg_article_count": 2, "got_digg_count": 1, "got_view_count": 149, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 2, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7006216065604648973, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6999624390404997157", "article_info": {"article_id": "6999624390404997157", "user_id": "2357005414307127", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/a32a05ac27e3406692a41d15a9ed9f2f~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "NodeJs深入浅出之旅:异步I/O (中)🐉", "brief_content": "此文是承接上文《NodeJs深入浅出之旅:异步I/O (上)》的,所以对于监听器的介绍可以查看之前的内容。还介绍了多异步之间的协作方案", "is_english": 0, "is_original": 1, "user_index": 4.56748211616137, "original_type": 0, "original_author": "", "content": "", "ctime": "1629727184", "mtime": "1629773996", "rtime": "1629773996", "draft_id": "6999243187172671519", "view_count": 95, "collect_count": 0, "digg_count": 14, "comment_count": 0, "hot_index": 18, "is_hot": 0, "rank_index": 0.00653972, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2357005414307127", "user_name": "空城机", "company": "", "job_title": "小小前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/a3398650b18308b2a2616f92dae00f3f~300x300.image", "level": 3, "description": "业余作者,在线水文", "followee_count": 51, "follower_count": 68, "post_article_count": 284, "digg_article_count": 2337, "got_digg_count": 1742, "got_view_count": 17632, "post_shortmsg_count": 1, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 1924, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6999624390404997157, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7006205253443928095", "article_info": {"article_id": "7006205253443928095", "user_id": "624178336895598", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342, 6809640715828592654], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/3777fa4fb35643deab19368bbd6cb9f5~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "单标签CSS实现自嗨兔(简陋版)", "brief_content": "我正在参加中秋创意投稿大赛,详情请看:中秋创意投稿大赛 以前一直没有在意单标签实现一些动画的原理,这次刚好蹭活动,同时补习一下。 做出来的自嗨兔效果: 附上源码库:https://gitee.com/", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1631259290", "mtime": "1631332625", "rtime": "1631262689", "draft_id": "7005928334362165279", "view_count": 45, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.0065358, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "624178336895598", "user_name": "dengdd酱", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/e062bb91be1ab2b765ce2da1d9497029~300x300.image", "level": 1, "description": "越努力越幸运", "followee_count": 35, "follower_count": 0, "post_article_count": 1, "digg_article_count": 4, "got_digg_count": 1, "got_view_count": 45, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 1, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546749, "tag_id": "6809640715828592654", "tag_name": "掘金技术征文", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/15541961813457c93d793d132eb6f090c15266807965a.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1481753122, "mtime": 1631691817, "id_type": 9, "tag_alias": "", "post_article_count": 658, "concern_user_count": 11586}], "user_interact": {"id": 7006205253443928095, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6964559926475620366", "article_info": {"article_id": "6964559926475620366", "user_id": "1028798614616120", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "用 gogocode 轻松写 webpack、vite 等打包插件", "brief_content": "作为百依百顺的项目组,那现在我们就通过一个简单的用例,带大家研究在这些打包工具中如何完美融入 gogocode!", "is_english": 0, "is_original": 1, "user_index": 10.243014698905002, "original_type": 0, "original_author": "", "content": "", "ctime": "1621563039", "mtime": "1621564066", "rtime": "1621564066", "draft_id": "6964556905368944647", "view_count": 2536, "collect_count": 30, "digg_count": 53, "comment_count": 9, "hot_index": 188, "is_hot": 0, "rank_index": 0.0065277, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1028798614616120", "user_name": "阿里妈妈前端快爆", "company": "Alibaba Inc.", "job_title": "前端小学生", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ef6dfe4669d52287b2de2e0a248c6e1b~300x300.image", "level": 3, "description": "阿里妈妈 MUX 倾力打造,每周更新一篇周刊以及不定时的发表原创文章", "followee_count": 13, "follower_count": 2331, "post_article_count": 46, "digg_article_count": 11, "got_digg_count": 2211, "got_view_count": 112826, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 3339, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6964559926475620366, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6994278335454904327", "article_info": {"article_id": "6994278335454904327", "user_id": "1460594842018446", "category_id": "6809637767543259144", "tag_ids": [6809640372352843789, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/aa2f5b8af32d4d01a61ab209ddbc2997~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": " 零基础学习MongoDB (二)—— 管理用户", "brief_content": "在b站上听了几个老师的课,有涉及到mongodb的一些历史,比如删库勒索,因此开放的数据库是很危险的,所以我们需要给它们添加管理用户,这样为", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628482467", "mtime": "1628489283", "rtime": "1628489283", "draft_id": "6994277010323275807", "view_count": 299, "collect_count": 0, "digg_count": 29, "comment_count": 1, "hot_index": 44, "is_hot": 0, "rank_index": 0.00652183, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1460594842018446", "user_name": "小丞同学", "company": "", "job_title": "前端开发 | 微信:Ljc-10c", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c3aa74d1dace9521012f90287a0ba442~300x300.image", "level": 3, "description": "电子信息类大二学生,目前学习方向前端,有兴趣的可以一起学习交流噢,也可以私聊添加我的微信一起探讨合作", "followee_count": 45, "follower_count": 164, "post_article_count": 65, "digg_article_count": 964, "got_digg_count": 1936, "got_view_count": 38346, "post_shortmsg_count": 21, "digg_shortmsg_count": 20, "isfollowed": false, "favorable_author": 0, "power": 2319, "study_point": 330, "university": {"university_id": "6888594359726538766", "name": "广东工业大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 2, "select_event_count": 0, "select_online_course_count": 2, "identity": 1, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546500, "tag_id": "6809640372352843789", "tag_name": "MongoDB", "color": "#419745", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/70f6d7392f9151842bce.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234551, "mtime": 1631692720, "id_type": 9, "tag_alias": "", "post_article_count": 1533, "concern_user_count": 86496}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6994278335454904327, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7001109597322739720", "article_info": {"article_id": "7001109597322739720", "user_id": "2049902560674295", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "使用SourceTree推送报错 \"error: RPC failed; curl 55 Recv failure: Connection was reset\"", "brief_content": "本文已参与掘金创作者训练营第三期「高产更文」赛道,详情查看:掘力计划|创作者训练营第三期正在进行,「写」出个人影响力。 一,遇到问题 提交项目时停了到这个界面很长时间... 等来的却是报错,告诉我提交", "is_english": 0, "is_original": 1, "user_index": 6.089693646737103, "original_type": 0, "original_author": "", "content": "", "ctime": "1630073287", "mtime": "1630130028", "rtime": "1630130028", "draft_id": "7000751122201706533", "view_count": 123, "collect_count": 0, "digg_count": 5, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00651986, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2049902560674295", "user_name": "陈言必行", "company": "没头脑办公室", "job_title": "没头发程序员", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/068c097001e6d100cc553612c10c397c~300x300.image", "level": 3, "description": "擅长摸鱼,划水,呆坐等众多工作技能", "followee_count": 44, "follower_count": 34, "post_article_count": 119, "digg_article_count": 931, "got_digg_count": 1185, "got_view_count": 8860, "post_shortmsg_count": 5, "digg_shortmsg_count": 25, "isfollowed": false, "favorable_author": 0, "power": 1273, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7001109597322739720, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7005561221919277092", "article_info": {"article_id": "7005561221919277092", "user_id": "651372791072520", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "使用 Vue-CLI 初始化项目", "brief_content": "--- highlight: agate --- > **使用 vue-cli 可以`快速搭建Vue开发环境` 以及 `对应的webpack配置`** ```js //由于国内直接使用 npm 的官方", "is_english": 0, "is_original": 1, "user_index": 1.159171578138246, "original_type": 0, "original_author": "", "content": "", "ctime": "1631109570", "mtime": "1631414011", "rtime": "1631167633", "draft_id": "7005551044809719822", "view_count": 52, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00651938, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "651372791072520", "user_name": "小蛋730", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/bdf793e7f88fb8176ef60348a70e38d8~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 3, "post_article_count": 7, "digg_article_count": 9, "got_digg_count": 7, "got_view_count": 438, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 11, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7005561221919277092, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "7006986662462881800", "article_info": {"article_id": "7006986662462881800", "user_id": "765688994609773", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "表示颜色的方法", "brief_content": "RGB和RGBA 在css样式中看到的#RRGGBB这代码就是表示RGB颜色的十六进制,分别表示红绿蓝三色通道的色阶,色阶表示通道的强弱。每个通道有256阶,取值0-256,三个通道组合起来理论上可以", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1631441313", "mtime": "1631664851", "rtime": "1631499311", "draft_id": "7006978193458462757", "view_count": 35, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 1, "is_hot": 0, "rank_index": 0.00651924, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "765688994609773", "user_name": "用户4217277088520", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/mosaic-legacy/3791/5070639578~300x300.image", "level": 0, "description": "", "followee_count": 0, "follower_count": 0, "post_article_count": 1, "digg_article_count": 0, "got_digg_count": 0, "got_view_count": 35, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 0, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7006986662462881800, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6999076144280535053", "article_info": {"article_id": "6999076144280535053", "user_id": "3157453124413358", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "深入理解Weex之原理篇", "brief_content": "这是我参与 8 月更文挑战的第 22 天,活动详情查看: 8月更文挑战 前言 前端工程师们一直在探索编写一套代码,可以运行在H5、安卓、IOS平台。 在早期,采用Hybrid进行H5混合开发,这种模式", "is_english": 0, "is_original": 1, "user_index": 7.717713814584834, "original_type": 0, "original_author": "", "content": "", "ctime": "1629599556", "mtime": "1629689372", "rtime": "1629689372", "draft_id": "6999076084784300045", "view_count": 124, "collect_count": 1, "digg_count": 10, "comment_count": 0, "hot_index": 16, "is_hot": 0, "rank_index": 0.00651685, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3157453124413358", "user_name": "so丶简单", "company": "上海", "job_title": "研发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/42f2b3038319b09f709a88c4e7da59b6~300x300.image", "level": 2, "description": "美团内推 http://dpurl.cn/89vbVGTz", "followee_count": 5, "follower_count": 69, "post_article_count": 38, "digg_article_count": 34, "got_digg_count": 447, "got_view_count": 30570, "post_shortmsg_count": 3, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 752, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6999076144280535053, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6994732613772509215", "article_info": {"article_id": "6994732613772509215", "user_id": "2594503170726343", "category_id": "6809637767543259144", "tag_ids": [6809641090145058824, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Flutter TabBar 使用介绍", "brief_content": "这是我参与8月更文挑战的第10天,活动详情查看:8月更文挑战 Tab关键元素 TabController 这是Tab页的控制器,用于定义Tab标签和内容页的坐标,还可配置标签页的切换动画效果等。 Ta", "is_english": 0, "is_original": 1, "user_index": 7.788881342176178, "original_type": 0, "original_author": "", "content": "", "ctime": "1628588130", "mtime": "1628746916", "rtime": "1628746916", "draft_id": "6994731021509853192", "view_count": 516, "collect_count": 1, "digg_count": 7, "comment_count": 0, "hot_index": 32, "is_hot": 0, "rank_index": 0.0065127, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2594503170726343", "user_name": "__white", "company": "", "job_title": "移动端开发工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/766d6bc843f0145d468ac8627db8db4e~300x300.image", "level": 2, "description": "喜欢 Flutter , Android , React Native , Music~", "followee_count": 12, "follower_count": 47, "post_article_count": 41, "digg_article_count": 35, "got_digg_count": 241, "got_view_count": 38133, "post_shortmsg_count": 0, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 620, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2547019, "tag_id": "6809641090145058824", "tag_name": "Flutter", "color": "", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/1519790365175e2d3ba2174d5c8f3fdc4687a8bbf5768.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1519761568, "mtime": 1631691946, "id_type": 9, "tag_alias": "", "post_article_count": 9303, "concern_user_count": 42137}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6994732613772509215, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6970820058629013541", "article_info": {"article_id": "6970820058629013541", "user_id": "712139267651623", "category_id": "6809637767543259144", "tag_ids": [6809641209204572174, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/e3cb0de1bcb7427980d1d2030c2625d6~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "HarmonyOS 来了,如何开发一个鸿蒙应用,起始篇", "brief_content": "鸿蒙应用居然可以直接用 js 开发?是不是很想试试呢,鸿蒙的 hml、css 和 js 有哪些特点,如何使用?本文带你一探究竟...", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623020522", "mtime": "1623125451", "rtime": "1623124316", "draft_id": "6970668624541548558", "view_count": 2208, "collect_count": 24, "digg_count": 38, "comment_count": 11, "hot_index": 159, "is_hot": 0, "rank_index": 0.0064811, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "712139267651623", "user_name": "夜尽灬天明丶", "company": "", "job_title": "花里胡哨的前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/16e6ddf2edd7654044e~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 4, "follower_count": 22, "post_article_count": 7, "digg_article_count": 21, "got_digg_count": 96, "got_view_count": 6317, "post_shortmsg_count": 2, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 159, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2547106, "tag_id": "6809641209204572174", "tag_name": "鸿蒙OS", "color": "", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/1560486100469dc100639f246a2bd14f26cd5c9dd554b.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1560457382, "mtime": 1631616852, "id_type": 9, "tag_alias": "harmonyOS", "post_article_count": 625, "concern_user_count": 3080}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6970820058629013541, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}, {"article_id": "6924829595006926856", "article_info": {"article_id": "6924829595006926856", "user_id": "3456520257288974", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/36ca4f5994f9446f9686fc67d0057417~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "动态表单之表单组件的插件式加载方案", "brief_content": "关于动态化表单方案前面我们已经有过一次分享,没看过的同学可以看下之前的文章 ZooTeam 拍了拍你,来看看如何设计动态化表单。文章中提到随着业务差异化增多,我们采用了动态表单解决重复开发及逻辑堆叠的问题。随着动态化表单系统运行过程中业务方接入的越来越多,自定义组件插件式加载的…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1612313114", "mtime": "1613640789", "rtime": "1612313248", "draft_id": "6924646625109344269", "view_count": 6309, "collect_count": 98, "digg_count": 131, "comment_count": 15, "hot_index": 461, "is_hot": 0, "rank_index": 0.00650789, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3456520257288974", "user_name": "政采云前端团队", "company": "公众号 @ 政采云前端团队", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/11/14/16e68d08d043c6bc~tplv-t2oaga2asx-image.image", "level": 6, "description": "政采云前端 ZooTeam 团队,不掺水的原创。 团队站点:https://zoo.team", "followee_count": 7, "follower_count": 21640, "post_article_count": 212, "digg_article_count": 224, "got_digg_count": 32190, "got_view_count": 1250356, "post_shortmsg_count": 2, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 45495, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 1, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6924829595006926856, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930839512402624526", "online_version_id": 6930875214439907335, "latest_version_id": 6930875214439907335, "power": 35652, "ctime": 1613712296, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6930875214439907335", "icon": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/16b7e78fac5b49a9a05ff1d2959f93b0~tplv-k3u1fbpfcp-watermark.image", "background": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/41d1c0cd091e42b1b52de07f7fff87e4~tplv-k3u1fbpfcp-zoom-1.image", "name": "政采云前端团队", "introduction": "政采云前端团队(ZooTeam,团队博客亦是 https://zoo.team),\n一个年轻富有激情和创造力的前端团队,隶属于政采云产品研发部,Base 在风景如画的杭州。\n\n为什么取名叫 ZooTeam —— Z 是政采云拼音首字母,oo 是无穷的符号,结合 Zoo 有生物圈的含义,希望后续政采云的前端团队,不论是人才梯队,还是技术体系,都能各面兼备,逐渐成长为一个生态。\n\n详情见语雀官网 https://www.yuque.com/zaotalk/ued/zooteam", "weibo_link": "", "github_link": "", "homepage_link": "https://www.yuque.com/zaotalk/ued/zooteam", "ctime": 1613721135, "mtime": 1613721135, "org_id": "6930839512402624526", "brief_introduction": "政采云前端团队(ZooTeam,团队博客亦是 https://zoo.team),一个年轻富有激情和创造力的前端团队,隶属于政采云产品研发部,Base 在风景如画的杭州。", "introduction_preview": "政采云前端团队(ZooTeam,团队博客亦是 https://zoo.team),\n一个年轻富有激情和创造力的前端团队,隶属于政采云产品研发部,Base 在风景如画的杭州。\n为什么取名叫 ZooTeam —— Z 是政采云拼音首字母,oo 是无穷的符号,结合 Zoo 有生物圈的含义,希望后续政采云的前端团队,不论是人才梯队,还是技术体系,都能各面兼备,逐渐成长为一个生态。\n详情见语雀官网 https://www.yuque.com/zaotalk/ued/zooteam"}, "follower_count": 19514, "article_view_count": 1005136, "article_digg_count": 25601}, "org_user": null, "is_followed": false}, "req_id": "2021091516045401020405722613005E0E"}], "cursor": "eyJ2IjoiNzAwNzYxNTY2NjYwOTk3OTQwMCIsImkiOjQzNDB9", "count": 34210, "has_more": true} | [
"[email protected]"
]
| |
cf34dafd04f5380b38b80c00bd543830273f58c1 | 96f79e659344edb2c6e50d1dd9660b6858054fdc | /lesson_04/server.py | cae3d711a46d4efab2511d01722bf145f97c0015 | []
| no_license | mr-Robot-777/client-server_python | af5b2d7b0bdbec9666e1771afd260b0afd39372a | 977c5cb8bfc57d6dc477eaf9083f154f848d637f | refs/heads/master | 2023-03-20T21:57:44.605765 | 2021-03-19T09:27:30 | 2021-03-19T09:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py |
"""Программа-сервер"""
import json
from socket import AF_INET, SOCK_STREAM, socket
from utils import create_parser
from variables import ENCODING, MAX_CONNECTIONS, MAX_PACKAGE_LENGTH
RESPONSE_ERROR = 400
RESPONSE_OK = 200
class Server:
def __init__(self):
self.transport = socket(AF_INET, SOCK_STREAM)
self.addr, self.port = create_parser()
def create_connection(self):
self.transport.bind((self.addr, self.port))
self.transport.listen(MAX_CONNECTIONS)
while True:
client, client_address = self.transport.accept()
response = RESPONSE_ERROR
data = client.recv(MAX_PACKAGE_LENGTH)
if data:
json_answer = data.decode(ENCODING)
response = self.process_client_message(json.loads(json_answer))
print(f'Отвечаем клиенту', response)
client.send(f'{response}'.encode(ENCODING))
client.close()
def process_client_message(self, message):
print('process_client_message', message)
if message['action'] == 'presence' and message['user']['account_name'] == 'GUEST':
return RESPONSE_OK
return RESPONSE_ERROR
def main():
server = Server()
server.create_connection()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
514cdf3548a97713adfb8111d14217eea97c17cf | 19ed724002351006b25175ad584c89f774bd9527 | /cvmodels/models/layers/se_module.py | 7283ff8a838fbc5a58077543bf837198de517ec7 | [
"MIT"
]
| permissive | welkin-feng/ComputerVision | 60df6aa36c26c630bcb3676c4ef0df6762d5329f | 667488e41878d7f0376142a7ae9e1b43c0edd68a | refs/heads/master | 2021-06-25T07:56:07.238130 | 2021-02-02T05:37:23 | 2021-02-02T05:37:23 | 193,149,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from torch import nn
__all__ = ['SEModule']
class SEModule(nn.Module):
def __init__(self, channels, reduction=16, act_layer=nn.ReLU):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
reduction_channels = max(channels // reduction, 8)
self.fc1 = nn.Conv2d(
channels, reduction_channels, kernel_size=1, padding=0, bias=True)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(
reduction_channels, channels, kernel_size=1, padding=0, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.fc1(x_se)
x_se = self.act(x_se)
x_se = self.fc2(x_se)
return x * x_se.sigmoid()
| [
"[email protected]"
]
| |
08b693a29c92e026ff58954982400db441b2cfbc | 010215c1421f5275a846e7154189b22cdd3c89bc | /Data Structures/Tree/identical_trees.py | dc9daa8e21e85efd3307005747d65175e3db3330 | []
| no_license | bsextion/CodingPractice_Py | ab54d5715298645a8fd7ab6945bf3b22d4e6a874 | da2847a04705394c32a6fe1b5f6c6b64c24647a3 | refs/heads/master | 2023-08-16T17:14:47.643989 | 2021-09-28T19:23:40 | 2021-09-28T19:23:40 | 383,658,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# below data members used only for some of the problems
self.next = None
self.parent = None
self.count = None
def are_identical(root1:BinaryTreeNode, root2:BinaryTreeNode):
word_left = depth_first(root1, "")
word_right = depth_first(root2, "")
if word_left == word_right:
return True
else:
return False
def depth_first(root, word):
if root:
word = depth_first(root.left, word)
word += "" + str(root.data)
word = depth_first(root.right, word)
return word
root1 = BinaryTreeNode(6)
root2 = BinaryTreeNode(6)
root1.left = BinaryTreeNode(4)
root2.left = BinaryTreeNode(4)
root1.right = BinaryTreeNode(7)
root1.right = BinaryTreeNode(7)
are_identical(root1, root2)
| [
"[email protected]"
]
| |
3c07919c47445d995fbca14d989d44437fbce99f | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /6.redis/redis11_持久化.py | 419d53bb9708d30f4e0f3001c584df957d59fc05 | []
| no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,779 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("持久化")
r2=s2.getRootTopic()
r2.setTitle("持久化")
content={
'目的':[
'解决宕机数据丢失问题'
],
'RDB':[
'默认持久化方式',
'定时将内存的数据以快照形式保存到硬盘',
'dump.rdb',
{'写磁盘方式':[
'二进制 + 数据压缩的方式写磁盘,文件体积小,数据恢复速度快'
]},
{'快照条件':[
'save 900 1 (900秒内至少有一个键被更改)'
]},
{'快照过程':[
{'rdbSave(生成RDB文件)':[
'fork函数(创建子进程)+cow函数(使用写时复制copy-on-write策略):',
'父子进程共享数据段,父进程继续提供读写服务,写脏的页面数据会逐渐和子进程分离开来'
]},
'rdbLoad(从文件加载内存)'
]},
{'加载':[
'redis启动后会读取RDB快照文件,将数据从磁盘载入内存'
]},
{'风险':[
'redis异常退出,会丢失最后一次快照后的更改数据'
]}
],
'AOF':[
'Append Only File----每次写操作都持久到磁盘',
'通过参数appendonly yes开启,默认文件appendonly.aof',
{'写磁盘方式':[
'纯文本文件,内容为redis客户端向redis发送的原始通信协议内容',
'记录的是每一次写命令,数据最全,但文件体积大,数据恢复速度慢'
]},
{'加载':[
'从持久化的日志中文件恢复数据'
]},
{'风险':[
'操作系统的缓存机制,数据并没有真正写入磁盘,只是进入系统的磁盘缓存,默认30s同步一次',
'通过参数优化此行为:appendfsync everysec(默认),每秒执行一次同步操作'
]},
'对AOF文件定时rewrite,避免文件体积持续膨胀'
],
'混合持久化':[
'AOF rewrite时,以RDB格式在AOF文件中写入一个数据快照,再把在这期间产生的每一个写命令,追加到AOF文件中',
'RDB是二进制压缩写入,AOF文件体积变小',
'Redis 4.0 以上版本支持'
],
'持久化策略选择':[
'Redis中的数据完全丢弃也没有关系,可以不进行任何持久化',
'单机环境,如可接受十几分钟或更多数据丢失,选择RDB;如只能接受秒级数据丢失,选择AOF',
'多数情况,会配置主从环境,slave既可实现数据的热备,也可分担Redis读请求,以及在master宕掉后继续提供服务'
],
'常见性能问题':[
{'master写内存快照':[
'save命令调度rdbSave函数',
'会阻塞主线程工作',
'当快照比较大时对性能影响非常大,会间断性暂停服务'
]},
{'master AOF持久化':[
'如不重写AOF文件,对性能的影响较小',
'但AOF文件会不断增大,AOF文件过大会影响Master重启的恢复速度',
'Master调用BGREWRITEAOF重写AOF文件,会占大量的CPU和内存资源,导致服务load过高,出现短暂服务暂停现象'
]},
'总结:Master最好不做任何持久化工作,如RDB内存快照和AOF日志文件',
{'建议':[
'如数据重要,某个slave开启AOF备份数据,策略设置为每秒同步一次',
'为了主从复制的速度和连接的稳定性,master和slave最好在同一个局域网'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"[email protected]"
]
| |
af8b8ae708671da42ab4c86b784d196be55e8ce3 | 4b55e257e0af5181c75a3b840a2c206cc6caec92 | /lightreid/optim/__init__.py | 05dac52793b24aa106f2ff3dcec83b52c1b6d1b8 | []
| no_license | Leopold0801/light-reid | 1799374b6e6552edeade737b137364de34d5b810 | aeb9fb8494611512dc9e3f3e3e7c9f7513c27a99 | refs/heads/master | 2022-11-26T21:19:39.005565 | 2020-08-10T16:56:05 | 2020-08-10T16:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """
@author: Guan'an Wang
@contact: [email protected]
"""
from .lr_scheduler import WarmupMultiStepLR, DelayedCosineAnnealingLR
class Optimizer(object):
KWARGS = ['fix_cnn_epochs']
def __init__(self, optimizer, lr_scheduler, max_epochs, **kwargs):
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.max_epochs = max_epochs
for key, value in kwargs.items():
assert key in Optimizer.KWARGS, 'expect {}, but got {}'.format(Optimizer.KWARGS, key)
setattr(self, key, value)
| [
"[email protected]"
]
| |
43714fe8477ba7894bde249b6e9a5d3cb9754e61 | c887e00981e6368e94916ca9b93c4de79a5c1a22 | /lawncare/blog/views.py | e3b3935c8c13e11b84ee6b90db6746f75b732bc7 | []
| no_license | devArist/school_project | 18dc0427e2d6a45abfff8a72dbe2c52a7afd8778 | 4d1c1ba5e2a9b4253e950e2c95e0ce6ef22efe3f | refs/heads/main | 2023-05-07T09:51:50.664546 | 2021-05-28T12:44:11 | 2021-05-28T12:44:11 | 368,508,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | from django.shortcuts import render
from . import models
# Create your views here.
def blog(request):
blogs = models.Blog.objects.filter(status=True).order_by('-date_update')
return render(request, 'blog/blog.html', locals())
def detail(request, pk):
blog = models.Blog.objects.get(pk=pk)
return render(request, 'blog/blog-single.html', locals()) | [
"[email protected]"
]
| |
6deeb692d77cfa26855fc96a02dc63807adf18bd | 338ad096a3d0311ccf994f38848646b4828b704b | /St102.py | d0dae54925ecf45a6b03322b580efe91d984e6ac | []
| no_license | thasleem-banu/beginnar | ed8a9ff7da23b194d5ce25ab0bb5fc0c4ac0a273 | b55f681f62e2b9e25c0fc5d8bf48f3e08321fbd2 | refs/heads/master | 2020-06-25T12:17:01.937351 | 2019-07-28T16:53:05 | 2019-07-28T16:53:05 | 199,305,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | ch=int(input())
cho=list(map(int,input().split()))
sha=0
for p in cho:
sha+=p
print(sha)
| [
"[email protected]"
]
| |
6af0eef7eaecf4e08598669f69be9120a2059704 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s165384014.py | 281a6c4886b4c12376aea57bca9bad100458536f | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | N, M = map(int, input().split())
road = []
for i in range(M):
a, b, c = map(int, input().split())
road.append((a-1, b-1, -c))
INF = 10**15
d = [INF] * N
d[0] = 0
def bellman_ford():
global d
for i in range(N):
for a, b, c in road:
d[b] = min(d[b], d[a] + c)
bellman_ford()
for a, b, c in road:
new_d = d[a] + c
if new_d < d[b]:
d[b] = - float('inf')
bellman_ford()
if d[-1] > -float('inf'):
print(int(-d[-1]))
else:
print('inf') | [
"[email protected]"
]
| |
9ca967cb2d1a93ca45878ccdbaa7a626b963fab4 | ef1d38cfef63f22e149d6c9dd14e98955693c50d | /webhook/protos/pogoprotos/data/ditto/rpc_response_event_params_pb2.py | e4f473d06c0e00583908cdcab8447be689edd11e | []
| no_license | Kneckter/WebhookListener | 4c186d9012fd6af69453d9d51ae33a38aa19b5fd | ea4ff29b66d6abf21cc1424ed976af76c3da5511 | refs/heads/master | 2022-10-09T04:26:33.466789 | 2019-11-24T17:30:59 | 2019-11-24T17:30:59 | 193,372,117 | 2 | 0 | null | 2022-09-23T22:26:10 | 2019-06-23T16:39:34 | Python | UTF-8 | Python | false | true | 3,011 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/ditto/rpc_response_event_params.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/ditto/rpc_response_event_params.proto',
package='pogoprotos.data.ditto',
syntax='proto3',
serialized_pb=_b('\n5pogoprotos/data/ditto/rpc_response_event_params.proto\x12\x15pogoprotos.data.ditto\"J\n\x16RpcResponseEventParams\x12\x0e\n\x06rpc_id\x18\x01 \x01(\r\x12\x0e\n\x06status\x18\x02 \x01(\r\x12\x10\n\x08payloads\x18\x03 \x03(\x0c\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RPCRESPONSEEVENTPARAMS = _descriptor.Descriptor(
name='RpcResponseEventParams',
full_name='pogoprotos.data.ditto.RpcResponseEventParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rpc_id', full_name='pogoprotos.data.ditto.RpcResponseEventParams.rpc_id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='pogoprotos.data.ditto.RpcResponseEventParams.status', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payloads', full_name='pogoprotos.data.ditto.RpcResponseEventParams.payloads', index=2,
number=3, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=80,
serialized_end=154,
)
DESCRIPTOR.message_types_by_name['RpcResponseEventParams'] = _RPCRESPONSEEVENTPARAMS
RpcResponseEventParams = _reflection.GeneratedProtocolMessageType('RpcResponseEventParams', (_message.Message,), dict(
DESCRIPTOR = _RPCRESPONSEEVENTPARAMS,
__module__ = 'pogoprotos.data.ditto.rpc_response_event_params_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.ditto.RpcResponseEventParams)
))
_sym_db.RegisterMessage(RpcResponseEventParams)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
f9ce9ebbf41ca7bdf7c0ae9d1b3acfbe30350953 | 525c6a69bcf924f0309b69f1d3aff341b06feb8e | /sunyata/layer/recurrent/lstm.py | ab6c9420189140a770d997facf97b92ca2501266 | []
| no_license | knighton/sunyata_2017 | ba3af4f17184d92f6277d428a81802ac12ef50a4 | 4e9d8e7d5666d02f9bb0aa9dfbd16b7a8e97c1c8 | refs/heads/master | 2021-09-06T13:19:06.341771 | 2018-02-07T00:28:07 | 2018-02-07T00:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | from ... import backend as Z
from ... import init
from ..base import LinkBuilder
from .base import RecurrentLayer, RecurrentSpec
class LSTMLayer(RecurrentLayer):
def __init__(self, forward, last, input_kernel, recurrent_kernel, bias):
dim = input_kernel.shape[1] // 4
dtype = input_kernel.dtype.name
super().__init__(dim, dtype, forward, last, dim)
self.input_kernel = self.add_param(input_kernel)
self.recurrent_kernel = self.add_param(recurrent_kernel)
self.bias = self.add_param(bias)
def step(self, x, prev_state, prev_internal_state):
a = Z.matmul(x, self.input_kernel) + \
Z.matmul(prev_state, self.recurrent_kernel) + self.bias
index = self.out_dim
i = Z.sigmoid(a[:, :index])
f = Z.sigmoid(a[:, index:2 * index])
o = Z.sigmoid(a[:, 2 * index:3 * index])
g = Z.tanh(a[:, 3 * index:])
next_internal_state = f * prev_internal_state + i * g
next_state = o * Z.tanh(next_internal_state)
return next_state, next_internal_state
class LSTMSpec(RecurrentSpec):
def __init__(self, dim=None, forward=True, last=False,
input_kernel_init='glorot_uniform',
recurrent_kernel_init='orthogonal', bias_init='zeros'):
super().__init__(dim, forward, last)
self.input_kernel_init = init.get(input_kernel_init)
self.recurrent_kernel_init = init.get(recurrent_kernel_init)
self.bias_init = init.get(bias_init)
def make_layer(self, in_dim, out_dim, dtype):
input_kernel_shape = in_dim, 4 * out_dim
input_kernel = self.input_kernel_init(
input_kernel_shape, dtype, 'conv_kernel')
recurrent_kernel_shape = out_dim, 4 * out_dim
recurrent_kernel = self.recurrent_kernel_init(
recurrent_kernel_shape, dtype)
bias_shape = 4 * out_dim,
bias = self.bias_init(bias_shape, dtype)
return LSTMLayer(self.go_forward, self.ret_last, input_kernel,
recurrent_kernel, bias)
LSTM = LinkBuilder(LSTMSpec)
| [
"[email protected]"
]
| |
2bc6ce1e38b0ff11a43a0471d5895cf0445c4e75 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/admob/v1/ads-admob-v1-py/google/ads/admob/__init__.py | 99b0f676816b735ae222673d657ca23ba987d36e | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,606 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.ads.admob_v1.services.ad_mob_api.client import AdMobApiClient
from google.ads.admob_v1.services.ad_mob_api.async_client import AdMobApiAsyncClient
from google.ads.admob_v1.types.admob_api import GenerateMediationReportRequest
from google.ads.admob_v1.types.admob_api import GenerateMediationReportResponse
from google.ads.admob_v1.types.admob_api import GenerateNetworkReportRequest
from google.ads.admob_v1.types.admob_api import GenerateNetworkReportResponse
from google.ads.admob_v1.types.admob_api import GetPublisherAccountRequest
from google.ads.admob_v1.types.admob_api import ListPublisherAccountsRequest
from google.ads.admob_v1.types.admob_api import ListPublisherAccountsResponse
from google.ads.admob_v1.types.admob_resources import DateRange
from google.ads.admob_v1.types.admob_resources import LocalizationSettings
from google.ads.admob_v1.types.admob_resources import MediationReportSpec
from google.ads.admob_v1.types.admob_resources import NetworkReportSpec
from google.ads.admob_v1.types.admob_resources import PublisherAccount
from google.ads.admob_v1.types.admob_resources import ReportFooter
from google.ads.admob_v1.types.admob_resources import ReportHeader
from google.ads.admob_v1.types.admob_resources import ReportRow
from google.ads.admob_v1.types.admob_resources import ReportWarning
from google.ads.admob_v1.types.admob_resources import StringList
from google.ads.admob_v1.types.admob_resources import SortOrder
__all__ = ('AdMobApiClient',
'AdMobApiAsyncClient',
'GenerateMediationReportRequest',
'GenerateMediationReportResponse',
'GenerateNetworkReportRequest',
'GenerateNetworkReportResponse',
'GetPublisherAccountRequest',
'ListPublisherAccountsRequest',
'ListPublisherAccountsResponse',
'DateRange',
'LocalizationSettings',
'MediationReportSpec',
'NetworkReportSpec',
'PublisherAccount',
'ReportFooter',
'ReportHeader',
'ReportRow',
'ReportWarning',
'StringList',
'SortOrder',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
c978e42a536117b3d4985389f63d8c254d9dd9e9 | 129e65b23a172ea686f220868c923eb4b08493c7 | /game/PyGame/Others/inkspill_buggy4.py | 2c65fd70d21bcd90e985ce83654362ddd56169bd | []
| no_license | urstkj/Python | 8dcf434858f9eb171204e064237bb10d76fe7f16 | 5a41df7c57c1975e0d335f59b528e28ba63cab85 | refs/heads/master | 2023-01-01T14:15:42.833144 | 2020-10-12T15:00:55 | 2020-10-12T15:00:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,580 | py | #!/usr/local/bin/python
#-*- coding: utf-8 -*-
# This version of the game has a bug in it. See if you can figure out how to fix it.
# http://inventwithpython.com/pygame/buggy
# Bug Description: Game doesn't run - "IndexError: list index out of range"
# Ink Spill (a Flood It clone)
# http://inventwithpython.com/pygame
# By Al Sweigart [email protected]
# Released under a "Simplified BSD" license
import copy
import pygame
from pygame.locals import *
import random
import sys
import webbrowser
# There are different box sizes, number of boxes, and
# life depending on the "board size" setting selected.
SMALLBOXSIZE = 60 # size is in pixels
MEDIUMBOXSIZE = 20
LARGEBOXSIZE = 11
SMALLBOARDSIZE = 6 # size is in boxes
MEDIUMBOARDSIZE = 17
LARGEBOARDSIZE = 30
SMALLMAXLIFE = 10 # number of turns
MEDIUMMAXLIFE = 30
LARGEMAXLIFE = 64
FPS = 30
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
boxSize = MEDIUMBOXSIZE
PALETTEGAPSIZE = 10
PALETTESIZE = 45
EASY = 0 # arbitrary but unique value
MEDIUM = 1 # arbitrary but unique value
HARD = 2 # arbitrary but unique value
difficulty = MEDIUM # game starts in "medium" mode
maxLife = MEDIUMMAXLIFE
boardWidth = MEDIUMBOARDSIZE
boardHeight = MEDIUMBOARDSIZE
# R G B
WHITE = (255, 255, 255)
DARKGRAY = (70, 70, 70)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
# The first color in each scheme is the background color, the next six are the palette colors.
COLORSCHEMES = (((150, 200, 255), RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE),
((0, 155, 104), (97, 215, 164), (228, 0, 69), (0, 125, 50), (204, 246, 0), (148, 0, 45), (241, 109, 149)),
((195, 179, 0), (255, 239, 115), (255, 226, 0), (147, 3, 167), (24, 38, 176), (166, 147, 0), (197, 97, 211)),
((85, 0, 0), (155, 39, 102), (0, 201, 13), (255, 118, 0), (206, 0, 113), (0, 130, 9), (255, 180, 115)),
((191, 159, 64), (183, 182, 208), (4, 31, 183), (167, 184, 45), (122, 128, 212), (37, 204, 7), (88, 155, 213)),
((200, 33, 205), (116, 252, 185), (68, 56, 56), (52, 238, 83), (23, 149, 195), (222, 157, 227), (212, 86, 185)))
for i in range(len(COLORSCHEMES)):
assert len(COLORSCHEMES[i]) == 7, 'Color scheme %s does not have exactly 7 colors.' % (i)
bgColor = COLORSCHEMES[0][0]
paletteColors = COLORSCHEMES[0][1:]
def main():
global FPSCLOCK, DISPLAYSURF, LOGOIMAGE, SPOTIMAGE, SETTINGSIMAGE, SETTINGSBUTTONIMAGE, RESETBUTTONIMAGE
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
# Load images
LOGOIMAGE = pygame.image.load('inkspilllogo.png')
SPOTIMAGE = pygame.image.load('inkspillspot.png')
SETTINGSIMAGE = pygame.image.load('inkspillsettings.png')
SETTINGSBUTTONIMAGE = pygame.image.load('inkspillsettingsbutton.png')
RESETBUTTONIMAGE = pygame.image.load('inkspillresetbutton.png')
pygame.display.set_caption('Ink Spill')
mousex = 0
mousey = 0
mainBoard = generateRandomBoard(boardWidth, boardHeight, difficulty)
life = maxLife
lastPaletteClicked = None
while True: # main game loop
paletteClicked = None
resetGame = False
# Draw the screen.
DISPLAYSURF.fill(bgColor)
drawLogoAndButtons()
drawBoard(mainBoard)
drawLifeMeter(life)
drawPalettes()
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
if pygame.Rect(WINDOWWIDTH - SETTINGSBUTTONIMAGE.get_width(),
WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height(),
SETTINGSBUTTONIMAGE.get_width(),
SETTINGSBUTTONIMAGE.get_height()).collidepoint(mousex, mousey):
resetGame = showSettingsScreen() # clicked on Settings button
elif pygame.Rect(WINDOWWIDTH - RESETBUTTONIMAGE.get_width(),
WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height() - RESETBUTTONIMAGE.get_height(),
RESETBUTTONIMAGE.get_width(),
RESETBUTTONIMAGE.get_height()).collidepoint(mousex, mousey):
resetGame = True # clicked on Reset button
else:
# check if a palette button was clicked
paletteClicked = getColorOfPaletteAt(mousex, mousey)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
# a palette button was clicked that is different from the
# last palette button clicked (this check prevents the player
# from accidentally clicking the same palette twice)
lastPaletteClicked = paletteClicked
floodAnimation(mainBoard, paletteClicked)
life -= 1
resetGame = False
if hasWon(mainBoard):
for i in range(4): # flash border 4 times
flashBorderAnimation(WHITE, mainBoard)
resetGame = True
pygame.time.wait(2000) # pause so the player can bask in victory
elif life == 0:
# life is zero, so player has lost
drawLifeMeter(0)
pygame.display.update()
pygame.time.wait(400)
for i in range(4):
flashBorderAnimation(BLACK, mainBoard)
resetGame = True
pygame.time.wait(2000) # pause so the player can suffer in their defeat
if resetGame:
# start a new game
mainBoard = generateRandomBoard(boardWidth, boardHeight, difficulty)
life = maxLife
lastPaletteClicked = None
pygame.display.update()
FPSCLOCK.tick(FPS)
def checkForQuit():
# Terminates the program if there are any QUIT or escape key events.
for event in pygame.event.get(QUIT): # get all the QUIT events
pygame.quit() # terminate if any QUIT events are present
sys.exit()
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
pygame.quit() # terminate if the KEYUP event was for the Esc key
sys.exit()
pygame.event.post(event) # put the other KEYUP event objects back
def hasWon(board):
# if the entire board is the same color, player has won
for x in range(boardWidth):
for y in range(boardHeight):
if board[x][y] != board[0][0]:
return False # found a different color, player has not won
return True
def showSettingsScreen():
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
# The pixel coordinates in this function were obtained by loading
# the inkspillsettings.png image into a graphics editor and reading
# the pixel coordinates from there. Handy trick.
origDifficulty = difficulty
origBoxSize = boxSize
screenNeedsRedraw = True
while True:
if screenNeedsRedraw:
DISPLAYSURF.fill(bgColor)
DISPLAYSURF.blit(SETTINGSIMAGE, (0, 0))
# place the ink spot marker next to the selected difficulty
if difficulty == EASY:
DISPLAYSURF.blit(SPOTIMAGE, (30, 4))
if difficulty == MEDIUM:
DISPLAYSURF.blit(SPOTIMAGE, (8, 41))
if difficulty == HARD:
DISPLAYSURF.blit(SPOTIMAGE, (30, 76))
# place the ink spot marker next to the selected size
if boxSize == SMALLBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (22, 150))
if boxSize == MEDIUMBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (11, 185))
if boxSize == LARGEBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (24, 220))
for i in range(len(COLORSCHEMES)):
drawColorSchemeBoxes(500, i * 60 + 30, i)
pygame.display.update()
screenNeedsRedraw = False # by default, don't redraw the screen
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
# Esc key on settings screen goes back to game
return not (origDifficulty == difficulty and origBoxSize == boxSize)
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect(74, 16, 111, 30).collidepoint(mousex, mousey):
difficulty = EASY
elif pygame.Rect(53, 50, 104, 29).collidepoint(mousex, mousey):
difficulty = MEDIUM
elif pygame.Rect(72, 85, 65, 31).collidepoint(mousex, mousey):
difficulty = HARD
# check for clicks on the size buttons
elif pygame.Rect(63, 156, 84, 31).collidepoint(mousex, mousey):
# small board size setting:
boxSize = SMALLBOXSIZE
boardWidth = SMALLBOARDSIZE
boardHeight = SMALLBOARDSIZE
maxLife = SMALLMAXLIFE
elif pygame.Rect(52, 192, 106, 32).collidepoint(mousex, mousey):
# medium board size setting:
boxSize = MEDIUMBOXSIZE
boardWidth = MEDIUMBOARDSIZE
boardHeight = MEDIUMBOARDSIZE
maxLife = MEDIUMMAXLIFE
elif pygame.Rect(67, 228, 58, 37).collidepoint(mousex, mousey):
# large board size setting:
boxSize = LARGEBOXSIZE
boardWidth = LARGEBOARDSIZE
boardHeight = LARGEBOARDSIZE
maxLife = LARGEMAXLIFE
elif pygame.Rect(14, 299, 371, 97).collidepoint(mousex, mousey):
# clicked on the "learn programming" ad
webbrowser.open('http://inventwithpython.com') # opens a web browser
elif pygame.Rect(178, 418, 215, 34).collidepoint(mousex, mousey):
# clicked on the "back to game" button
return not (origDifficulty == difficulty and origBoxSize == boxSize)
for i in range(len(COLORSCHEMES)):
# clicked on a color scheme button
if pygame.Rect(500, 30 + i * 60, MEDIUMBOXSIZE * 3, MEDIUMBOXSIZE * 2).collidepoint(mousex, mousey):
bgColor = COLORSCHEMES[i][0]
paletteColors = COLORSCHEMES[i][1:]
def drawColorSchemeBoxes(x, y, schemeNum):
# Draws the color scheme boxes that appear on the "Settings" screen.
for boxy in range(2):
for boxx in range(3):
pygame.draw.rect(DISPLAYSURF, COLORSCHEMES[schemeNum][3 * boxy + boxx + 1], (x + MEDIUMBOXSIZE * boxx, y + MEDIUMBOXSIZE * boxy, MEDIUMBOXSIZE, MEDIUMBOXSIZE))
if paletteColors == COLORSCHEMES[schemeNum][1:]:
# put the ink spot next to the selected color scheme
DISPLAYSURF.blit(SPOTIMAGE, (x - 50, y))
def flashBorderAnimation(color, board, animationSpeed=30):
origSurf = DISPLAYSURF.copy()
flashSurf = pygame.Surface(DISPLAYSURF.get_size())
flashSurf = flashSurf.convert_alpha()
for start, end, step in ((0, 256, 1), (255, 0, -1)):
# the first iteration on the outer loop will set the inner loop
# to have transparency go from 0 to 255, the second iteration will
# have it go from 255 to 0. This is the "flash".
for transparency in range(start, end, animationSpeed * step):
DISPLAYSURF.blit(origSurf, (0, 0))
r, g, b = color
flashSurf.fill((r, g, b, transparency))
DISPLAYSURF.blit(flashSurf, (0, 0))
drawBoard(board) # draw board ON TOP OF the transparency layer
pygame.display.update()
FPSCLOCK.tick(FPS)
DISPLAYSURF.blit(origSurf, (0, 0)) # redraw the original surface
def floodAnimation(board, paletteClicked, animationSpeed=25):
origBoard = copy.deepcopy(board)
floodFill(board, board[0][0], paletteClicked, 0, 0)
for transparency in range(0, 255, animationSpeed):
# The "new" board slowly become opaque over the original board.
drawBoard(origBoard)
drawBoard(board, transparency)
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRandomBoard(width, height, difficulty=MEDIUM):
# Creates a board data structure with random colors for each box.
board = []
for x in range(width):
column = []
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
# Make board easier by setting some boxes to same color as a neighbor.
# Determine how many boxes to change.
if difficulty == EASY:
if boxSize == SMALLBOXSIZE:
boxesToChange = 100
else:
boxesToChange = 1500
elif difficulty == MEDIUM:
if boxSize == SMALLBOXSIZE:
boxesToChange = 5
else:
boxesToChange = 200
else:
boxesToChange = 0
# Change neighbor's colors:
for i in range(boxesToChange):
# Randomly choose a box whose color to copy
x = random.randint(1, width-2)
y = random.randint(1, height-2)
# Randomly choose neighbors to change.
direction = random.randint(0, 3)
if direction == 0: # change left and up neighbor
board[x-1][y] == board[x][y]
board[x][y-1] == board[x][y]
elif direction == 1: # change right and down neighbor
board[x + 1][y] == board[x][y]
board[x][y + 1] == board[x][y]
elif direction == 2: # change right and up neighbor
board[x][y-1] == board[x][y]
board[x + 1][y] == board[x][y]
else: # change left and down neighbor
board[x][y + 1] == board[x][y]
board[x-1][y] == board[x][y]
return board
def drawLogoAndButtons():
# draw the Ink Spill logo and Settings and Reset buttons.
DISPLAYSURF.blit(LOGOIMAGE, (WINDOWWIDTH - LOGOIMAGE.get_width(), 0))
DISPLAYSURF.blit(SETTINGSBUTTONIMAGE, (WINDOWWIDTH - SETTINGSBUTTONIMAGE.get_width(), WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height()))
DISPLAYSURF.blit(RESETBUTTONIMAGE, (WINDOWWIDTH - RESETBUTTONIMAGE.get_width(), WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height() - RESETBUTTONIMAGE.get_height()))
def drawBoard(board, transparency=255):
# The colored squares are drawn to a temporary surface which is then
# drawn to the DISPLAYSURF surface. This is done so we can draw the
# squares with transparency on top of DISPLAYSURF as it currently is.
tempSurf = pygame.Surface(DISPLAYSURF.get_size())
tempSurf = tempSurf.convert_alpha()
tempSurf.fill((0, 0, 0, 0))
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
r, g, b = paletteColors[board[x][y]]
pygame.draw.rect(tempSurf, (r, g, b, transparency), (left, top, boxSize, boxSize))
left, top = leftTopPixelCoordOfBox(0, 0)
pygame.draw.rect(tempSurf, BLACK, (left-1, top-1, boxSize * boardWidth + 1, boxSize * boardHeight + 1), 1)
DISPLAYSURF.blit(tempSurf, (0, 0))
def drawPalettes():
# Draws the six color palettes at the bottom of the screen.
numColors = len(paletteColors)
xmargin = int((WINDOWWIDTH - ((PALETTESIZE * numColors) + (PALETTEGAPSIZE * (numColors - 1)))) / 2)
for i in range(numColors):
left = xmargin + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
top = WINDOWHEIGHT - PALETTESIZE - 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))
pygame.draw.rect(DISPLAYSURF, bgColor, (left + 2, top + 2, PALETTESIZE - 4, PALETTESIZE - 4), 2)
def drawLifeMeter(currentLife):
lifeBoxSize = int((WINDOWHEIGHT - 40) / maxLife)
# Draw background color of life meter.
pygame.draw.rect(DISPLAYSURF, bgColor, (20, 20, 20, 20 + (maxLife * lifeBoxSize)))
for i in range(maxLife):
if currentLife >= (maxLife - i): # draw a solid red box
pygame.draw.rect(DISPLAYSURF, RED, (20, 20 + (i * lifeBoxSize), 20, lifeBoxSize))
pygame.draw.rect(DISPLAYSURF, WHITE, (20, 20 + (i * lifeBoxSize), 20, lifeBoxSize), 1) # draw white outline
def getColorOfPaletteAt(x, y):
# Returns the index of the color in paletteColors that the x and y parameters
# are over. Returns None if x and y are not over any palette.
numColors = len(paletteColors)
xmargin = int((WINDOWWIDTH - ((PALETTESIZE * numColors) + (PALETTEGAPSIZE * (numColors - 1)))) / 2)
top = WINDOWHEIGHT - PALETTESIZE - 10
for i in range(numColors):
# Find out if the mouse click is inside any of the palettes.
left = xmargin + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
r = pygame.Rect(left, top, PALETTESIZE, PALETTESIZE)
if r.collidepoint(x, y):
return i
return None # no palette exists at these x, y coordinates
def floodFill(board, oldColor, newColor, x, y):
# This is the flood fill algorithm.
if oldColor == newColor or board[x][y] != oldColor:
return
board[x][y] = newColor # change the color of the current box
# Make the recursive call for any neighboring boxes:
if x > 0:
floodFill(board, oldColor, newColor, x - 1, y) # on box to the left
if x < boardWidth - 1:
floodFill(board, oldColor, newColor, x + 1, y) # on box to the right
if y > 0:
floodFill(board, oldColor, newColor, x, y - 1) # on box to up
if y < boardHeight - 1:
floodFill(board, oldColor, newColor, x, y + 1) # on box to down
def leftTopPixelCoordOfBox(boxx, boxy):
# Returns the x and y of the left-topmost pixel of the xth & yth box.
xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2)
ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2)
return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
774941c1e4550ae6504fd4d14f90db5cb0b4ea86 | a71fbf421c43fcb34fe7c8000eb807677821683c | /python_import/p42_from_module.py | 6adea0acaab4069110e5384b1022f70212ec8b84 | []
| no_license | leekyunghun/bit_seoul | ccd96dca3774f259e04b8388e134d6183b974268 | b76a3d5f83b77f5345d61cf3baa68aaefc25cd2a | refs/heads/master | 2023-02-06T08:12:17.768076 | 2020-12-22T13:35:19 | 2020-12-22T13:35:19 | 311,286,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from machine.car import drive
from machine.tv import watch
drive()
watch()
from machine import car
from machine import tv
car.drive()
tv.watch()
| [
"[email protected]"
]
| |
c98dfb752acee87d50afcc78ce6a2ff51cfc674f | 89f3169a2393bff8880f657d9bb4c12b40729e9a | /2020-06/abc051_b.py | dbd393e5b7ceda1a54913e15b23019463161141a | []
| no_license | YutaGoto/daily_atcoder | c087adbb7fa03f0cdc4291c806f21b1b93130d86 | 113d4e25f1d3bb0e665f9154bc0afaecae5ea7bf | refs/heads/main | 2023-06-19T00:56:12.359473 | 2021-07-16T12:33:06 | 2021-07-16T12:33:06 | 273,282,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | k, s = map(int, input().split())
a = 0
for x in range(0, k+1):
for y in range(0, k+1):
z = s - x - y
if z >= 0 and z <= k:
a += 1
print(a)
| [
"[email protected]"
]
| |
90168e13f27d0f16c51ca125a2be4b1e7e075cfc | 4fc21c3f8dca563ce8fe0975b5d60f68d882768d | /Darlington/phase1/python Basic 1/day 13 solution/qtn2.py | d0cbe7f2d2874086c5319bbe0c7055094a0354db | [
"MIT"
]
| permissive | Uche-Clare/python-challenge-solutions | 17e53dbedbff2f33e242cf8011696b3059cd96e9 | 49ede6204ee0a82d5507a19fbc7590a1ae10f058 | refs/heads/master | 2022-11-13T15:06:52.846937 | 2020-07-10T20:59:37 | 2020-07-10T20:59:37 | 266,404,840 | 1 | 0 | MIT | 2020-05-23T19:24:56 | 2020-05-23T19:24:55 | null | UTF-8 | Python | false | false | 208 | py | #program to remove the first item from a specified list.
names = ["peter", "winner", "Austin", "ND", "Darlington"]
print("\nFirst Name: ",names)
del names[0]
print("After removing the first : ",names)
print() | [
"[email protected]"
]
| |
ed12611516370eb81804ac8f6bdfcdcbd60a9752 | 4c984a318ccf26e765f902669399da66497e194d | /pollexe/settings/summernote/conf.py | 392425c7e9f0d2983aec17391359a06fce26c55d | []
| no_license | sajalmia381/pollexe | 914af663bad6becb4308c738a16240028f37f99b | 3ead47fee43855aba1ee0f4c2b3f222cac6a9a68 | refs/heads/master | 2020-04-21T12:42:49.283843 | 2019-02-07T13:43:40 | 2019-02-07T13:43:40 | 169,572,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | # SUMMERNOTE_THEME = 'bs4'
SUMMERNOTE_CONFIG = {
# Using SummernoteWidget - iframe mode, default
'iframe': True,
# Or, you can set it as False to use SummernoteInplaceWidget by default - no iframe mode
# In this case, you have to load Bootstrap/jQuery stuff by manually.
# Use this when you're already using Bootstraip/jQuery based themes.
'iframe': False,
# You can put custom Summernote settings
'summernote': {
# As an example, using Summernote Air-mode
'airMode': False,
# Change editor size
'width': '100%',
'height': '480',
# Use proper language setting automatically (default)
'lang': None,
# Or, set editor language/locale forcely
'lang': 'ko-KR',
# ...
# You can also add custom settings for external plugins
'print': {
'stylesheetUrl': '/some_static_folder/printable.css',
},
},
# Need authentication while uploading attachments.
'attachment_require_authentication': True,
# Set `upload_to` function for attachments.
'attachment_upload_to': '/media/',
# Set custom storage class for attachments.
'attachment_storage_class': 'my.custom.storage.class.name',
# Set custom model for attachments (default: 'django_summernote.Attachment')
'attachment_model': 'my.custom.attachment.model', # must inherit 'django_summernote.AbstractAttachment'
# You can disable attachment feature.
'disable_attachment': False,
# You can add custom css/js for SummernoteWidget.
'css': (
),
'js': (
),
# You can also add custom css/js for SummernoteInplaceWidget.
# !!! Be sure to put {{ form.media }} in template before initiate summernote.
'css_for_inplace': (
),
'js_for_inplace': (
),
# Codemirror as codeview
# If any codemirror settings are defined, it will include codemirror files automatically.
'css': {
'//cdnjs.cloudflare.com/ajax/libs/codemirror/5.29.0/theme/monokai.min.css',
},
'codemirror': {
'mode': 'htmlmixed',
'lineNumbers': 'true',
# You have to include theme file in 'css' or 'css_for_inplace' before using it.
'theme': 'monokai',
},
# Lazy initialize
# If you want to initialize summernote at the bottom of page, set this as True
# and call `initSummernote()` on your page.
'lazy': True,
# To use external plugins,
# Include them within `css` and `js`.
'js': {
'/some_static_folder/summernote-ext-print.js',
'//somewhere_in_internet/summernote-plugin-name.js',
},
} | [
"[email protected]"
]
| |
bb6034702ab10541abffa775201702a7d77dc308 | 41249d7d4ca9950b9c6fee89bf7e2c1929629767 | /results/rabi_and_lmg_optimizations_different_constraints_20190228/script_rabi_bangramp_neldermead_bounds12.py | a389f358b918172fdab3458cc34595ac1a335274 | [
"MIT"
]
| permissive | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | f739b3baad1d2aadda576303bb0bbe9d48ec204a | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | refs/heads/master | 2022-11-22T00:44:09.998199 | 2020-07-21T08:35:28 | 2020-07-21T08:35:28 | 281,237,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'rabi'
model_parameters = dict(N=100, Omega=100, omega_0=1.)
protocol = 'bangramp'
optimization_method = 'Nelder-Mead'
parameters_constraints = [-12, 12]
# ------ build and check name for output file
additional_file_name_qualifiers = None
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method,
parameters_constraints=parameters_constraints
),
other_options=dict(
scan_times=np.linspace(0.1, 4, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| [
"[email protected]"
]
| |
941761be2cc2b83e4fb14b8e9bd41be81dd7e935 | 3330090c2b3608eedbce99e55a2a8a5d87cc163f | /lib/actions/drivers_test.py | f78eb2b1553a49633af7f65761e717c7c87f933c | [
"Apache-2.0"
]
| permissive | Venseer/glazier | ea935af0afff001a26538972d24622c69598628f | db24121e5b79bd377c721ca8d9c391db56841a4f | refs/heads/master | 2021-01-11T05:22:52.400113 | 2018-06-25T10:52:19 | 2018-06-25T10:52:19 | 79,860,410 | 0 | 0 | Apache-2.0 | 2018-06-25T10:52:20 | 2017-01-23T23:42:37 | Python | UTF-8 | Python | false | false | 3,986 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.actions.drivers."""
from glazier.lib.actions import drivers
from glazier.lib.buildinfo import BuildInfo
import mock
from google.apputils import basetest
class DriversTest(basetest.TestCase):
@mock.patch.object(BuildInfo, 'ReleasePath')
@mock.patch('glazier.lib.download.Download.VerifyShaHash', autospec=True)
@mock.patch('glazier.lib.download.Download.DownloadFile', autospec=True)
@mock.patch.object(drivers, 'Execute', autospec=True)
@mock.patch.object(drivers.file_util, 'CreateDirectories', autospec=True)
def testDriverWIM(self, mkdir, exe, dl, sha, rpath):
bi = BuildInfo()
# Setup
remote = '@Drivers/Lenovo/W54x-Win10-Storage.wim'
local = r'c:\W54x-Win10-Storage.wim'
sha_256 = (
'D30F9DB0698C87901DF6824D11203BDC2D6DAAF0CE14ABD7C0A7B75974936748')
conf = {
'data': {
'driver': [[remote, local, sha_256]]
},
'path': ['/autobuild']
}
rpath.return_value = '/'
# Success
dw = drivers.DriverWIM(conf['data']['driver'], bi)
dw.Run()
dl.assert_called_with(
mock.ANY, ('https://glazier-server.example.com/'
'bin/Drivers/Lenovo/W54x-Win10-Storage.wim'),
local,
show_progress=True)
sha.assert_called_with(mock.ANY, local, sha_256)
cache = drivers.constants.SYS_CACHE
exe.assert_called_with([[('X:\\Windows\\System32\\dism.exe /Unmount-Image '
'/MountDir:%s\\Drivers\\ /Discard' % cache)]],
mock.ANY)
mkdir.assert_called_with('%s\\Drivers\\' % cache)
# Invalid format
conf['data']['driver'][0][1] = 'C:\\W54x-Win10-Storage.zip'
dw = drivers.DriverWIM(conf['data']['driver'], bi)
self.assertRaises(drivers.ActionError, dw.Run)
conf['data']['driver'][0][1] = 'C:\\W54x-Win10-Storage.wim'
# Mount Fail
exe.return_value.Run.side_effect = drivers.ActionError()
self.assertRaises(drivers.ActionError, dw.Run)
# Dism Fail
exe.return_value.Run.side_effect = iter([0, drivers.ActionError()])
self.assertRaises(drivers.ActionError, dw.Run)
# Unmount Fail
exe.return_value.Run.side_effect = iter([0, 0, drivers.ActionError()])
self.assertRaises(drivers.ActionError, dw.Run)
def testDriverWIMValidate(self):
g = drivers.DriverWIM('String', None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([[1, 2, 3]], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([[1, '/tmp/out/path']], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['/tmp/src.zip', 2]], None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['https://glazier/bin/src.wim', '/tmp/out/src.zip']],
None)
self.assertRaises(drivers.ValidationError, g.Validate)
g = drivers.DriverWIM([['https://glazier/bin/src.wim', '/tmp/out/src.wim']],
None)
g.Validate()
g = drivers.DriverWIM(
[['https://glazier/bin/src.wim', '/tmp/out/src.wim', '12345']], None)
g.Validate()
g = drivers.DriverWIM(
[['https://glazier/bin/src.zip', '/tmp/out/src.zip', '12345', '67890']],
None)
self.assertRaises(drivers.ValidationError, g.Validate)
if __name__ == '__main__':
basetest.main()
| [
"[email protected]"
]
| |
b4d48deeab90710e4f81fb5ff97b545bb3a77179 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/incremental-memory-leak.py | 1705ea35d0108812a35a7859a8fdf28edbb2a50f | [
"MIT"
]
| permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 1,064 | py | # Time: O(1)
# Space: O(1)
# Same problem from https://codingcompetitions.withgoogle.com/codejam/round/000000000019ffb9/00000000003384ea
class Solution(object):
def memLeak(self, memory1, memory2):
"""
:type memory1: int
:type memory2: int
:rtype: List[int]
"""
def s(a, d, n):
return (2*a + (n-1)*d)*n//2
def f(a, d, x):
r = int((-(2*a-d)+((2*a-d)**2+8*d*x)**0.5)/(2*d))
if s(a, d, r) > x: # adjust float accuracy
r -= 1
return r
is_swapped = False
if memory1 < memory2:
memory1, memory2 = memory2, memory1
is_swapped = True
n = f(1, 1, memory1-memory2)
memory1 -= s(1, 1, n)
if memory1 == memory2:
is_swapped = False
l = f(n+1, 2, memory1)
r = f(n+2, 2, memory2)
memory1 -= s(n+1, 2, l)
memory2 -= s(n+2, 2, r)
if is_swapped:
memory1, memory2 = memory2, memory1
return [n+l+r+1, memory1, memory2]
| [
"[email protected]"
]
| |
1204ca4653c8968bf60985caa1ab1428e568e339 | d9cb81209d452b7c3180cd2e2b3e1b00279b469c | /proc_TED_2_NOV20161_adjusted.py | 623ca2a355b2c5efeb7490545a9385aca30410f3 | [
"CC0-1.0"
]
| permissive | riceissa/total-economy-database | 3857a0b9bc1de393fc681b43914b26c0adf2c8bc | 0052bb2202458a7e908203b222d404b266ee1c0d | refs/heads/master | 2022-08-12T16:10:30.891745 | 2022-07-23T05:48:34 | 2022-07-23T05:48:34 | 107,448,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | #!/usr/bin/env python3
import csv
import sys
import re
from devec_sql_common import *
insert_line = "insert into data(region, odate, database_url, data_retrieval_method, metric, units, value, notes) values"
count = 0
first = True
with open("../total-economy-database-data/TED_2_NOV20161_adjusted.csv", newline='') as f:
reader = csv.DictReader(f)
for row in reader:
for year in range(1995, 2016):
y = str(year)
if row[y]:
if first:
print(insert_line)
print(" " + ("" if first else ",") + "(" + uniq_join([
mysql_quote(region_normalized(row['COUNTRY'])), # region
mysql_string_date(y), # odate
mysql_quote("https://www.conference-board.org/retrievefile.cfm?filename=TED_2_NOV20161.xlsx&type=subsite"), # database_url
mysql_quote(""), # data_retrieval_method
mysql_quote(row['INDICATOR'] + " (adjusted)"), # metric
mysql_quote(row['MEASURE']), # units
mysql_float(row[y]), # value
mysql_quote(""), # notes
]) + ")")
first = False
count += 1
if count > 5000:
count = 0
first = True
print(";")
if not first:
print(";")
| [
"[email protected]"
]
| |
17a0ccae6225c5831dfd34cfb3a9e83af48d05fc | ffdc77394c5b5532b243cf3c33bd584cbdc65cb7 | /tests/st/networks/models/bert/src/__init__.py | 4f4584a4b483b32d6ddacc12923d127d5a549061 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
]
| permissive | mindspore-ai/mindspore | ca7d5bb51a3451c2705ff2e583a740589d80393b | 54acb15d435533c815ee1bd9f6dc0b56b4d4cf83 | refs/heads/master | 2023-07-29T09:17:11.051569 | 2023-07-17T13:14:15 | 2023-07-17T13:14:15 | 239,714,835 | 4,178 | 768 | Apache-2.0 | 2023-07-26T22:31:11 | 2020-02-11T08:43:48 | C++ | UTF-8 | Python | false | false | 1,653 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bert Init."""
from .bert_for_pre_training import BertNetworkWithLoss, BertPreTraining, \
BertPretrainingLoss, GetMaskedLMOutput, GetNextSentenceOutput, \
BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell
from .bert_model import BertAttention, BertConfig, BertEncoderCell, BertModel, \
BertOutput, BertSelfAttention, BertTransformer, EmbeddingLookup, \
EmbeddingPostprocessor, RelaPosEmbeddingsGenerator, RelaPosMatrixGenerator, \
SaturateCast, CreateAttentionMaskFromInputMask
__all__ = [
"BertNetworkWithLoss", "BertPreTraining", "BertPretrainingLoss",
"GetMaskedLMOutput", "GetNextSentenceOutput", "BertTrainOneStepCell", "BertTrainOneStepWithLossScaleCell",
"BertAttention", "BertConfig", "BertEncoderCell", "BertModel", "BertOutput",
"BertSelfAttention", "BertTransformer", "EmbeddingLookup",
"EmbeddingPostprocessor", "RelaPosEmbeddingsGenerator",
"RelaPosMatrixGenerator", "SaturateCast", "CreateAttentionMaskFromInputMask"
]
| [
"[email protected]"
]
| |
3dfe5bdfaef0f2bb0484a3cedcb49aace1286dab | 496a63f41fa32e2bb3ecce0d35ff4374f1c02ad5 | /src/data/handlers/options.py | 1e2f69ca64a2a2b696cedf6bff9de1e3eb46b311 | [
"BSD-3-Clause"
]
| permissive | vincent-lg/avenew.one | bbfa8d44e68db943b8825e9d4a32a43e985778fe | fb7f98d331e47e2032ee1e51bf3e4b2592807fdf | refs/heads/main | 2023-02-14T00:28:53.511552 | 2021-01-13T11:13:07 | 2021-01-13T11:13:07 | 330,207,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | # Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Option handler, to work with an inline dictionary.
Contrary to most handlers, the OptionHandler requires a
`binary_options` field, set as a required byte string on the
entity it modifies.
"""
import pickle
from collections.abc import MutableMapping
class OptionHandler(MutableMapping):
"""Option handler, to handle options in a dictionary-like object.
The option handler is an object which uses a binary representation,
stored in the entity itself. It has all the methods one can expect
from a dictionary and can be used as such.
>>> session.options["username"] = "someone"
>>> session.options["username"]
'someone'
>>> len(session.options)
1
>>> del session.options["username"]
>>> sesession.options.get("username", "")
''
>>> # ...
"""
__slots__ = ("__owner", "__binary_field", "__options")
def __init__(self, owner, binary_field="binary_options"):
self.__owner = owner
self.__binary_field = binary_field
binary = getattr(owner, binary_field)
self.__options = pickle.loads(binary)
def __len__(self):
return len(self.__options)
def __iter__(self):
return iter(self.__options)
def __getitem__(self, key):
return self.__options[key]
def __setitem__(self, key, value):
self.__options[key] = value
setattr(self.__owner, self.__binary_field, pickle.dumps(
self.__options))
def __delitem__(self, key):
del self.__options[key]
setattr(self.__owner, self.__binary_field, pickle.dumps(
self.__options))
| [
"[email protected]"
]
| |
7fb0fcce7a07d40707030a7b6d5b1f5de8882482 | 1a1c372244ef0e64da4629496bb3eb1b00cb47fc | /configs/guided_anchoring/ga_faster_x101_32x4d_fpn_1x.py | dabdf6c9864e1a9dad1e759165d76472618e78b0 | [
"Apache-2.0"
]
| permissive | speedinghzl/mmdetection | 2ab7926251ed3ee8f86dcba6f0b85081eac0ef53 | 339f37a21b6e4001e90734f6fce1559843e83487 | refs/heads/master | 2020-05-30T09:54:02.302563 | 2019-06-02T03:41:45 | 2019-06-02T03:41:45 | 189,658,179 | 4 | 0 | Apache-2.0 | 2019-05-31T21:03:41 | 2019-05-31T21:03:40 | null | UTF-8 | Python | false | false | 5,721 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(
type='IoULoss', style='bounded', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
img_scale=(1333, 800),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_faster_rcnn_x101_32x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
]
| |
4b2110ebacae297132391eab34bb7b1f2d33b59d | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/plotly/py2/plotly/graph_objs/layout/template/__init__.py | d79d09c5515911865948b620ebebfb9ac2777926 | [
"MIT",
"Apache-2.0"
]
| permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 56,166 | py | from plotly.graph_objs import Layout
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Data(_BaseLayoutHierarchyType):
# area
# ----
@property
def area(self):
"""
The 'area' property is a tuple of instances of
Area that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Area
- A list or tuple of dicts of string/value properties that
will be passed to the Area constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Area]
"""
return self["area"]
@area.setter
def area(self, val):
self["area"] = val
# barpolar
# --------
@property
def barpolar(self):
"""
The 'barpolar' property is a tuple of instances of
Barpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Barpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Barpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Barpolar]
"""
return self["barpolar"]
@barpolar.setter
def barpolar(self, val):
self["barpolar"] = val
# bar
# ---
@property
def bar(self):
"""
The 'bar' property is a tuple of instances of
Bar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Bar
- A list or tuple of dicts of string/value properties that
will be passed to the Bar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Bar]
"""
return self["bar"]
@bar.setter
def bar(self, val):
self["bar"] = val
# box
# ---
@property
def box(self):
"""
The 'box' property is a tuple of instances of
Box that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Box
- A list or tuple of dicts of string/value properties that
will be passed to the Box constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Box]
"""
return self["box"]
@box.setter
def box(self, val):
self["box"] = val
# candlestick
# -----------
@property
def candlestick(self):
"""
The 'candlestick' property is a tuple of instances of
Candlestick that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Candlestick
- A list or tuple of dicts of string/value properties that
will be passed to the Candlestick constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Candlestick]
"""
return self["candlestick"]
@candlestick.setter
def candlestick(self, val):
self["candlestick"] = val
# carpet
# ------
@property
def carpet(self):
"""
The 'carpet' property is a tuple of instances of
Carpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Carpet
- A list or tuple of dicts of string/value properties that
will be passed to the Carpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Carpet]
"""
return self["carpet"]
@carpet.setter
def carpet(self, val):
self["carpet"] = val
# choroplethmapbox
# ----------------
@property
def choroplethmapbox(self):
"""
The 'choroplethmapbox' property is a tuple of instances of
Choroplethmapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choroplethmapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Choroplethmapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choroplethmapbox]
"""
return self["choroplethmapbox"]
@choroplethmapbox.setter
def choroplethmapbox(self, val):
self["choroplethmapbox"] = val
# choropleth
# ----------
@property
def choropleth(self):
"""
The 'choropleth' property is a tuple of instances of
Choropleth that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Choropleth
- A list or tuple of dicts of string/value properties that
will be passed to the Choropleth constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Choropleth]
"""
return self["choropleth"]
@choropleth.setter
def choropleth(self, val):
self["choropleth"] = val
# cone
# ----
@property
def cone(self):
"""
The 'cone' property is a tuple of instances of
Cone that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone
- A list or tuple of dicts of string/value properties that
will be passed to the Cone constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Cone]
"""
return self["cone"]
@cone.setter
def cone(self, val):
self["cone"] = val
# contourcarpet
# -------------
@property
def contourcarpet(self):
"""
The 'contourcarpet' property is a tuple of instances of
Contourcarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contourcarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Contourcarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contourcarpet]
"""
return self["contourcarpet"]
@contourcarpet.setter
def contourcarpet(self, val):
self["contourcarpet"] = val
# contour
# -------
@property
def contour(self):
"""
The 'contour' property is a tuple of instances of
Contour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Contour
- A list or tuple of dicts of string/value properties that
will be passed to the Contour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Contour]
"""
return self["contour"]
@contour.setter
def contour(self, val):
self["contour"] = val
# densitymapbox
# -------------
@property
def densitymapbox(self):
"""
The 'densitymapbox' property is a tuple of instances of
Densitymapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Densitymapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Densitymapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Densitymapbox]
"""
return self["densitymapbox"]
@densitymapbox.setter
def densitymapbox(self, val):
self["densitymapbox"] = val
# funnelarea
# ----------
@property
def funnelarea(self):
"""
The 'funnelarea' property is a tuple of instances of
Funnelarea that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Funnelarea
- A list or tuple of dicts of string/value properties that
will be passed to the Funnelarea constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Funnelarea]
"""
return self["funnelarea"]
@funnelarea.setter
def funnelarea(self, val):
self["funnelarea"] = val
# funnel
# ------
@property
def funnel(self):
"""
The 'funnel' property is a tuple of instances of
Funnel that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Funnel
- A list or tuple of dicts of string/value properties that
will be passed to the Funnel constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Funnel]
"""
return self["funnel"]
@funnel.setter
def funnel(self, val):
self["funnel"] = val
# heatmapgl
# ---------
@property
def heatmapgl(self):
"""
The 'heatmapgl' property is a tuple of instances of
Heatmapgl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmapgl
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmapgl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmapgl]
"""
return self["heatmapgl"]
@heatmapgl.setter
def heatmapgl(self, val):
self["heatmapgl"] = val
# heatmap
# -------
@property
def heatmap(self):
"""
The 'heatmap' property is a tuple of instances of
Heatmap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Heatmap
- A list or tuple of dicts of string/value properties that
will be passed to the Heatmap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Heatmap]
"""
return self["heatmap"]
@heatmap.setter
def heatmap(self, val):
self["heatmap"] = val
# histogram2dcontour
# ------------------
@property
def histogram2dcontour(self):
"""
The 'histogram2dcontour' property is a tuple of instances of
Histogram2dContour that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2dContour
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2dContour constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2dContour]
"""
return self["histogram2dcontour"]
@histogram2dcontour.setter
def histogram2dcontour(self, val):
self["histogram2dcontour"] = val
# histogram2d
# -----------
@property
def histogram2d(self):
"""
The 'histogram2d' property is a tuple of instances of
Histogram2d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram2d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram2d]
"""
return self["histogram2d"]
@histogram2d.setter
def histogram2d(self, val):
self["histogram2d"] = val
# histogram
# ---------
@property
def histogram(self):
"""
The 'histogram' property is a tuple of instances of
Histogram that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram
- A list or tuple of dicts of string/value properties that
will be passed to the Histogram constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Histogram]
"""
return self["histogram"]
@histogram.setter
def histogram(self, val):
self["histogram"] = val
# image
# -----
@property
def image(self):
"""
The 'image' property is a tuple of instances of
Image that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Image
- A list or tuple of dicts of string/value properties that
will be passed to the Image constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Image]
"""
return self["image"]
@image.setter
def image(self, val):
self["image"] = val
# indicator
# ---------
@property
def indicator(self):
"""
The 'indicator' property is a tuple of instances of
Indicator that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Indicator
- A list or tuple of dicts of string/value properties that
will be passed to the Indicator constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Indicator]
"""
return self["indicator"]
@indicator.setter
def indicator(self, val):
self["indicator"] = val
# isosurface
# ----------
@property
def isosurface(self):
"""
The 'isosurface' property is a tuple of instances of
Isosurface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Isosurface
- A list or tuple of dicts of string/value properties that
will be passed to the Isosurface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Isosurface]
"""
return self["isosurface"]
@isosurface.setter
def isosurface(self, val):
self["isosurface"] = val
# mesh3d
# ------
@property
def mesh3d(self):
"""
The 'mesh3d' property is a tuple of instances of
Mesh3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Mesh3d
- A list or tuple of dicts of string/value properties that
will be passed to the Mesh3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Mesh3d]
"""
return self["mesh3d"]
@mesh3d.setter
def mesh3d(self, val):
self["mesh3d"] = val
# ohlc
# ----
@property
def ohlc(self):
"""
The 'ohlc' property is a tuple of instances of
Ohlc that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc
- A list or tuple of dicts of string/value properties that
will be passed to the Ohlc constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Ohlc]
"""
return self["ohlc"]
@ohlc.setter
def ohlc(self, val):
self["ohlc"] = val
# parcats
# -------
@property
def parcats(self):
"""
The 'parcats' property is a tuple of instances of
Parcats that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcats
- A list or tuple of dicts of string/value properties that
will be passed to the Parcats constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcats]
"""
return self["parcats"]
@parcats.setter
def parcats(self, val):
self["parcats"] = val
# parcoords
# ---------
@property
def parcoords(self):
"""
The 'parcoords' property is a tuple of instances of
Parcoords that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Parcoords
- A list or tuple of dicts of string/value properties that
will be passed to the Parcoords constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Parcoords]
"""
return self["parcoords"]
@parcoords.setter
def parcoords(self, val):
self["parcoords"] = val
# pie
# ---
@property
def pie(self):
"""
The 'pie' property is a tuple of instances of
Pie that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pie
- A list or tuple of dicts of string/value properties that
will be passed to the Pie constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pie]
"""
return self["pie"]
@pie.setter
def pie(self, val):
self["pie"] = val
# pointcloud
# ----------
@property
def pointcloud(self):
"""
The 'pointcloud' property is a tuple of instances of
Pointcloud that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud
- A list or tuple of dicts of string/value properties that
will be passed to the Pointcloud constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Pointcloud]
"""
return self["pointcloud"]
@pointcloud.setter
def pointcloud(self, val):
self["pointcloud"] = val
# sankey
# ------
@property
def sankey(self):
"""
The 'sankey' property is a tuple of instances of
Sankey that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sankey
- A list or tuple of dicts of string/value properties that
will be passed to the Sankey constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sankey]
"""
return self["sankey"]
@sankey.setter
def sankey(self, val):
self["sankey"] = val
# scatter3d
# ---------
@property
def scatter3d(self):
"""
The 'scatter3d' property is a tuple of instances of
Scatter3d that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter3d
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter3d constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter3d]
"""
return self["scatter3d"]
@scatter3d.setter
def scatter3d(self, val):
self["scatter3d"] = val
# scattercarpet
# -------------
@property
def scattercarpet(self):
"""
The 'scattercarpet' property is a tuple of instances of
Scattercarpet that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattercarpet
- A list or tuple of dicts of string/value properties that
will be passed to the Scattercarpet constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattercarpet]
"""
return self["scattercarpet"]
@scattercarpet.setter
def scattercarpet(self, val):
self["scattercarpet"] = val
# scattergeo
# ----------
@property
def scattergeo(self):
"""
The 'scattergeo' property is a tuple of instances of
Scattergeo that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergeo
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergeo constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergeo]
"""
return self["scattergeo"]
@scattergeo.setter
def scattergeo(self, val):
self["scattergeo"] = val
# scattergl
# ---------
@property
def scattergl(self):
"""
The 'scattergl' property is a tuple of instances of
Scattergl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattergl
- A list or tuple of dicts of string/value properties that
will be passed to the Scattergl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattergl]
"""
return self["scattergl"]
@scattergl.setter
def scattergl(self, val):
self["scattergl"] = val
# scattermapbox
# -------------
@property
def scattermapbox(self):
"""
The 'scattermapbox' property is a tuple of instances of
Scattermapbox that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scattermapbox
- A list or tuple of dicts of string/value properties that
will be passed to the Scattermapbox constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scattermapbox]
"""
return self["scattermapbox"]
@scattermapbox.setter
def scattermapbox(self, val):
self["scattermapbox"] = val
# scatterpolargl
# --------------
@property
def scatterpolargl(self):
"""
The 'scatterpolargl' property is a tuple of instances of
Scatterpolargl that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolargl
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolargl constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolargl]
"""
return self["scatterpolargl"]
@scatterpolargl.setter
def scatterpolargl(self, val):
self["scatterpolargl"] = val
# scatterpolar
# ------------
@property
def scatterpolar(self):
"""
The 'scatterpolar' property is a tuple of instances of
Scatterpolar that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterpolar
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterpolar constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterpolar]
"""
return self["scatterpolar"]
@scatterpolar.setter
def scatterpolar(self, val):
self["scatterpolar"] = val
# scatter
# -------
@property
def scatter(self):
"""
The 'scatter' property is a tuple of instances of
Scatter that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatter
- A list or tuple of dicts of string/value properties that
will be passed to the Scatter constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatter]
"""
return self["scatter"]
@scatter.setter
def scatter(self, val):
self["scatter"] = val
# scatterternary
# --------------
@property
def scatterternary(self):
"""
The 'scatterternary' property is a tuple of instances of
Scatterternary that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Scatterternary
- A list or tuple of dicts of string/value properties that
will be passed to the Scatterternary constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Scatterternary]
"""
return self["scatterternary"]
@scatterternary.setter
def scatterternary(self, val):
self["scatterternary"] = val
# splom
# -----
@property
def splom(self):
"""
The 'splom' property is a tuple of instances of
Splom that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Splom
- A list or tuple of dicts of string/value properties that
will be passed to the Splom constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Splom]
"""
return self["splom"]
@splom.setter
def splom(self, val):
self["splom"] = val
# streamtube
# ----------
@property
def streamtube(self):
"""
The 'streamtube' property is a tuple of instances of
Streamtube that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Streamtube
- A list or tuple of dicts of string/value properties that
will be passed to the Streamtube constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Streamtube]
"""
return self["streamtube"]
@streamtube.setter
def streamtube(self, val):
self["streamtube"] = val
# sunburst
# --------
@property
def sunburst(self):
"""
The 'sunburst' property is a tuple of instances of
Sunburst that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Sunburst
- A list or tuple of dicts of string/value properties that
will be passed to the Sunburst constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Sunburst]
"""
return self["sunburst"]
@sunburst.setter
def sunburst(self, val):
self["sunburst"] = val
# surface
# -------
@property
def surface(self):
"""
The 'surface' property is a tuple of instances of
Surface that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Surface
- A list or tuple of dicts of string/value properties that
will be passed to the Surface constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Surface]
"""
return self["surface"]
@surface.setter
def surface(self, val):
self["surface"] = val
# table
# -----
@property
def table(self):
"""
The 'table' property is a tuple of instances of
Table that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Table
- A list or tuple of dicts of string/value properties that
will be passed to the Table constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Table]
"""
return self["table"]
@table.setter
def table(self, val):
self["table"] = val
# treemap
# -------
@property
def treemap(self):
"""
The 'treemap' property is a tuple of instances of
Treemap that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Treemap
- A list or tuple of dicts of string/value properties that
will be passed to the Treemap constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Treemap]
"""
return self["treemap"]
@treemap.setter
def treemap(self, val):
self["treemap"] = val
# violin
# ------
@property
def violin(self):
"""
The 'violin' property is a tuple of instances of
Violin that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Violin
- A list or tuple of dicts of string/value properties that
will be passed to the Violin constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Violin]
"""
return self["violin"]
@violin.setter
def violin(self, val):
self["violin"] = val
# volume
# ------
@property
def volume(self):
"""
The 'volume' property is a tuple of instances of
Volume that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Volume
- A list or tuple of dicts of string/value properties that
will be passed to the Volume constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Volume]
"""
return self["volume"]
@volume.setter
def volume(self, val):
self["volume"] = val
# waterfall
# ---------
@property
def waterfall(self):
"""
The 'waterfall' property is a tuple of instances of
Waterfall that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.template.data.Waterfall
- A list or tuple of dicts of string/value properties that
will be passed to the Waterfall constructor
Supported dict properties:
Returns
-------
tuple[plotly.graph_objs.layout.template.data.Waterfall]
"""
return self["waterfall"]
@waterfall.setter
def waterfall(self, val):
self["waterfall"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.template"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
area
A tuple of :class:`plotly.graph_objects.Area` instances
or dicts with compatible properties
barpolar
A tuple of :class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar` instances
or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box` instances
or dicts with compatible properties
candlestick
A tuple of :class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of :class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone` instances
or dicts with compatible properties
contourcarpet
A tuple of :class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of :class:`plotly.graph_objects.Contour`
instances or dicts with compatible properties
densitymapbox
A tuple of :class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of :class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of :class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of :class:`plotly.graph_objects.Heatmap`
instances or dicts with compatible properties
histogram2dcontour
A tuple of
:class:`plotly.graph_objects.Histogram2dContour`
instances or dicts with compatible properties
histogram2d
A tuple of :class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of :class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of :class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of :class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc` instances
or dicts with compatible properties
parcats
A tuple of :class:`plotly.graph_objects.Parcats`
instances or dicts with compatible properties
parcoords
A tuple of :class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie` instances
or dicts with compatible properties
pointcloud
A tuple of :class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of :class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of :class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of :class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of :class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of :class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of :class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of :class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of :class:`plotly.graph_objects.Scatter`
instances or dicts with compatible properties
scatterternary
A tuple of :class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of :class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of :class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of :class:`plotly.graph_objects.Surface`
instances or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of :class:`plotly.graph_objects.Treemap`
instances or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of :class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
"""
def __init__(
self,
arg=None,
area=None,
barpolar=None,
bar=None,
box=None,
candlestick=None,
carpet=None,
choroplethmapbox=None,
choropleth=None,
cone=None,
contourcarpet=None,
contour=None,
densitymapbox=None,
funnelarea=None,
funnel=None,
heatmapgl=None,
heatmap=None,
histogram2dcontour=None,
histogram2d=None,
histogram=None,
image=None,
indicator=None,
isosurface=None,
mesh3d=None,
ohlc=None,
parcats=None,
parcoords=None,
pie=None,
pointcloud=None,
sankey=None,
scatter3d=None,
scattercarpet=None,
scattergeo=None,
scattergl=None,
scattermapbox=None,
scatterpolargl=None,
scatterpolar=None,
scatter=None,
scatterternary=None,
splom=None,
streamtube=None,
sunburst=None,
surface=None,
table=None,
treemap=None,
violin=None,
volume=None,
waterfall=None,
**kwargs
):
"""
Construct a new Data object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.template.Data`
area
A tuple of :class:`plotly.graph_objects.Area` instances
or dicts with compatible properties
barpolar
A tuple of :class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar` instances
or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box` instances
or dicts with compatible properties
candlestick
A tuple of :class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of :class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone` instances
or dicts with compatible properties
contourcarpet
A tuple of :class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of :class:`plotly.graph_objects.Contour`
instances or dicts with compatible properties
densitymapbox
A tuple of :class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of :class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of :class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of :class:`plotly.graph_objects.Heatmap`
instances or dicts with compatible properties
histogram2dcontour
A tuple of
:class:`plotly.graph_objects.Histogram2dContour`
instances or dicts with compatible properties
histogram2d
A tuple of :class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of :class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of :class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of :class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc` instances
or dicts with compatible properties
parcats
A tuple of :class:`plotly.graph_objects.Parcats`
instances or dicts with compatible properties
parcoords
A tuple of :class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie` instances
or dicts with compatible properties
pointcloud
A tuple of :class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of :class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of :class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of :class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of :class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of :class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of :class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of :class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of :class:`plotly.graph_objects.Scatter`
instances or dicts with compatible properties
scatterternary
A tuple of :class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of :class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of :class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of :class:`plotly.graph_objects.Surface`
instances or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of :class:`plotly.graph_objects.Treemap`
instances or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of :class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
Returns
-------
Data
"""
super(Data, self).__init__("data")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.template.Data
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.template.Data`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.layout.template import data as v_data
# Initialize validators
# ---------------------
self._validators["area"] = v_data.AreasValidator()
self._validators["barpolar"] = v_data.BarpolarsValidator()
self._validators["bar"] = v_data.BarsValidator()
self._validators["box"] = v_data.BoxsValidator()
self._validators["candlestick"] = v_data.CandlesticksValidator()
self._validators["carpet"] = v_data.CarpetsValidator()
self._validators["choroplethmapbox"] = v_data.ChoroplethmapboxsValidator()
self._validators["choropleth"] = v_data.ChoroplethsValidator()
self._validators["cone"] = v_data.ConesValidator()
self._validators["contourcarpet"] = v_data.ContourcarpetsValidator()
self._validators["contour"] = v_data.ContoursValidator()
self._validators["densitymapbox"] = v_data.DensitymapboxsValidator()
self._validators["funnelarea"] = v_data.FunnelareasValidator()
self._validators["funnel"] = v_data.FunnelsValidator()
self._validators["heatmapgl"] = v_data.HeatmapglsValidator()
self._validators["heatmap"] = v_data.HeatmapsValidator()
self._validators["histogram2dcontour"] = v_data.Histogram2dContoursValidator()
self._validators["histogram2d"] = v_data.Histogram2dsValidator()
self._validators["histogram"] = v_data.HistogramsValidator()
self._validators["image"] = v_data.ImagesValidator()
self._validators["indicator"] = v_data.IndicatorsValidator()
self._validators["isosurface"] = v_data.IsosurfacesValidator()
self._validators["mesh3d"] = v_data.Mesh3dsValidator()
self._validators["ohlc"] = v_data.OhlcsValidator()
self._validators["parcats"] = v_data.ParcatssValidator()
self._validators["parcoords"] = v_data.ParcoordssValidator()
self._validators["pie"] = v_data.PiesValidator()
self._validators["pointcloud"] = v_data.PointcloudsValidator()
self._validators["sankey"] = v_data.SankeysValidator()
self._validators["scatter3d"] = v_data.Scatter3dsValidator()
self._validators["scattercarpet"] = v_data.ScattercarpetsValidator()
self._validators["scattergeo"] = v_data.ScattergeosValidator()
self._validators["scattergl"] = v_data.ScatterglsValidator()
self._validators["scattermapbox"] = v_data.ScattermapboxsValidator()
self._validators["scatterpolargl"] = v_data.ScatterpolarglsValidator()
self._validators["scatterpolar"] = v_data.ScatterpolarsValidator()
self._validators["scatter"] = v_data.ScattersValidator()
self._validators["scatterternary"] = v_data.ScatterternarysValidator()
self._validators["splom"] = v_data.SplomsValidator()
self._validators["streamtube"] = v_data.StreamtubesValidator()
self._validators["sunburst"] = v_data.SunburstsValidator()
self._validators["surface"] = v_data.SurfacesValidator()
self._validators["table"] = v_data.TablesValidator()
self._validators["treemap"] = v_data.TreemapsValidator()
self._validators["violin"] = v_data.ViolinsValidator()
self._validators["volume"] = v_data.VolumesValidator()
self._validators["waterfall"] = v_data.WaterfallsValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("area", None)
self["area"] = area if area is not None else _v
_v = arg.pop("barpolar", None)
self["barpolar"] = barpolar if barpolar is not None else _v
_v = arg.pop("bar", None)
self["bar"] = bar if bar is not None else _v
_v = arg.pop("box", None)
self["box"] = box if box is not None else _v
_v = arg.pop("candlestick", None)
self["candlestick"] = candlestick if candlestick is not None else _v
_v = arg.pop("carpet", None)
self["carpet"] = carpet if carpet is not None else _v
_v = arg.pop("choroplethmapbox", None)
self["choroplethmapbox"] = (
choroplethmapbox if choroplethmapbox is not None else _v
)
_v = arg.pop("choropleth", None)
self["choropleth"] = choropleth if choropleth is not None else _v
_v = arg.pop("cone", None)
self["cone"] = cone if cone is not None else _v
_v = arg.pop("contourcarpet", None)
self["contourcarpet"] = contourcarpet if contourcarpet is not None else _v
_v = arg.pop("contour", None)
self["contour"] = contour if contour is not None else _v
_v = arg.pop("densitymapbox", None)
self["densitymapbox"] = densitymapbox if densitymapbox is not None else _v
_v = arg.pop("funnelarea", None)
self["funnelarea"] = funnelarea if funnelarea is not None else _v
_v = arg.pop("funnel", None)
self["funnel"] = funnel if funnel is not None else _v
_v = arg.pop("heatmapgl", None)
self["heatmapgl"] = heatmapgl if heatmapgl is not None else _v
_v = arg.pop("heatmap", None)
self["heatmap"] = heatmap if heatmap is not None else _v
_v = arg.pop("histogram2dcontour", None)
self["histogram2dcontour"] = (
histogram2dcontour if histogram2dcontour is not None else _v
)
_v = arg.pop("histogram2d", None)
self["histogram2d"] = histogram2d if histogram2d is not None else _v
_v = arg.pop("histogram", None)
self["histogram"] = histogram if histogram is not None else _v
_v = arg.pop("image", None)
self["image"] = image if image is not None else _v
_v = arg.pop("indicator", None)
self["indicator"] = indicator if indicator is not None else _v
_v = arg.pop("isosurface", None)
self["isosurface"] = isosurface if isosurface is not None else _v
_v = arg.pop("mesh3d", None)
self["mesh3d"] = mesh3d if mesh3d is not None else _v
_v = arg.pop("ohlc", None)
self["ohlc"] = ohlc if ohlc is not None else _v
_v = arg.pop("parcats", None)
self["parcats"] = parcats if parcats is not None else _v
_v = arg.pop("parcoords", None)
self["parcoords"] = parcoords if parcoords is not None else _v
_v = arg.pop("pie", None)
self["pie"] = pie if pie is not None else _v
_v = arg.pop("pointcloud", None)
self["pointcloud"] = pointcloud if pointcloud is not None else _v
_v = arg.pop("sankey", None)
self["sankey"] = sankey if sankey is not None else _v
_v = arg.pop("scatter3d", None)
self["scatter3d"] = scatter3d if scatter3d is not None else _v
_v = arg.pop("scattercarpet", None)
self["scattercarpet"] = scattercarpet if scattercarpet is not None else _v
_v = arg.pop("scattergeo", None)
self["scattergeo"] = scattergeo if scattergeo is not None else _v
_v = arg.pop("scattergl", None)
self["scattergl"] = scattergl if scattergl is not None else _v
_v = arg.pop("scattermapbox", None)
self["scattermapbox"] = scattermapbox if scattermapbox is not None else _v
_v = arg.pop("scatterpolargl", None)
self["scatterpolargl"] = scatterpolargl if scatterpolargl is not None else _v
_v = arg.pop("scatterpolar", None)
self["scatterpolar"] = scatterpolar if scatterpolar is not None else _v
_v = arg.pop("scatter", None)
self["scatter"] = scatter if scatter is not None else _v
_v = arg.pop("scatterternary", None)
self["scatterternary"] = scatterternary if scatterternary is not None else _v
_v = arg.pop("splom", None)
self["splom"] = splom if splom is not None else _v
_v = arg.pop("streamtube", None)
self["streamtube"] = streamtube if streamtube is not None else _v
_v = arg.pop("sunburst", None)
self["sunburst"] = sunburst if sunburst is not None else _v
_v = arg.pop("surface", None)
self["surface"] = surface if surface is not None else _v
_v = arg.pop("table", None)
self["table"] = table if table is not None else _v
_v = arg.pop("treemap", None)
self["treemap"] = treemap if treemap is not None else _v
_v = arg.pop("violin", None)
self["violin"] = violin if violin is not None else _v
_v = arg.pop("volume", None)
self["volume"] = volume if volume is not None else _v
_v = arg.pop("waterfall", None)
self["waterfall"] = waterfall if waterfall is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Data", "Layout", "data"]
from plotly.graph_objs.layout.template import data
| [
"[email protected]"
]
| |
c018206b30963896b42d4d8f91602b19fbb8578c | 36afa271f080459adf1014cd23f4be9f954dfee6 | /Crawler/Requests/douban.py | 5160d23321fc240a3aeba30552e69eecea7db83c | []
| no_license | King-Of-Game/Python | b69186a7574ce1c0b7097207cfe9a2eb38a90bc0 | 643b9fd22efd78f6679735f23432943a57b5f5bb | refs/heads/master | 2023-05-25T05:35:14.473114 | 2021-10-24T12:52:21 | 2021-10-24T12:52:21 | 151,251,434 | 3 | 0 | null | 2023-05-01T20:51:50 | 2018-10-02T12:34:04 | HTML | UTF-8 | Python | false | false | 2,155 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib.request
import lxml.html
import csv
url = 'https://movie.douban.com/top250?start={}&filter='
# 得到当前Url的数据,返回类型为string
def getSource(url):
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0"
}
request = urllib.request.Request(url=url, headers=header)
response = urllib.request.urlopen(request)
data = response.read().decode('utf-8')
return data
# 得到每一个页面对应的数据,用lxml模块解析后把数据存入列表并返回
def everyPage(source):
select = lxml.html.document_fromstring(source)
movieList = select.xpath('//div[@class="info"]')
showList = []
for i in movieList:
movieDict = {}
title = i.xpath('div[@class="hd"]/a/span[@class="title"]/text()')[0]
otherTitle = i.xpath('div[@class="hd"]/a/span[@class="other"]/text()')[0]
mainning = i.xpath('div[@class="bd"]/p[@class=""]/text()')[0]
star = i.xpath('//div[@class="star"]/span[@class="rating_num"]/text()')[0]
quote = i.xpath('//p[@class="quote"]/span/text()')[0]
link = i.xpath('div[@class="hd"]/a/@href')[0]
movieDict['片名'] = ''.join(title + otherTitle)
movieDict['演职员'] = mainning
movieDict['评分'] = star
movieDict['名言'] = quote
movieDict['链接'] = link
showList.append(movieDict)
return showList
# 生成CSV文件
def getCsv(movieList):
f = open('douban1.csv', 'w', encoding='utf-8', newline='')
writer = csv.DictWriter(f, fieldnames=['片名', '演职员', '评分', '名言', '链接'])
writer.writeheader()
for i in movieList:
writer.writerow(i)
if __name__ == '__main__':
movieList = []
for i in range(10):
nowUrl = url.format(i*25) # 循环得到每一个Url
print(nowUrl)
source = getSource(nowUrl) # 循环得到每一个Url的数据
movieList += everyPage(source) # 循环累加得到的数据
print(movieList)
getCsv(movieList) # 把数据传入生成CSV文件的方法中
| [
"[email protected]"
]
| |
6314341c47af973fbafa71b98b6a1e0add874c4e | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon9996.py | b916bd74061aa9b0b4371a44afce755157f406d1 | []
| no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | n=int(input())
pattern=input().strip().split("*")
for _ in range(n):
text=input().strip()
print("DA" if text.index(pattern[0])<text.index(pattern[1]) else "NE") | [
"[email protected]"
]
| |
662b4cbedb8253d593dc611960818f152b7a1d55 | d78dfc5089717fc242bbd7097f507d811abb4260 | /USA/script.icechannel.extn.common/plugins/liveresolvers/kingofplayerscom_lrv.py | 4de6efd8dce31783d522389a9397402c3a1d5726 | []
| no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 1,760 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveResolver
from entertainment.plugnplay import Plugin
from entertainment import common
class kingofplayercom(LiveResolver):
implements = [LiveResolver]
name = 'kingofplayer.com'
def ResolveLive(self, content, url):
import re
new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.(?:js|html))[\'"]{1}', content)
if new_content:
page_url = new_content.group(1)
from entertainment.net import Net
net = Net()
new_content = net.http_GET( page_url, headers={'Referer':url} ).content
streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content)
if not streamer:
new_content = re.search('src=[\'"]{1}(http://cdn\.kingofplayers\.com/.+?\.html)[\'"]{1}', new_content)
new_url = new_content.group(1)
new_content = net.http_GET( new_url, headers={'Referer':page_url} ).content
page_url = new_url
streamer = re.search('[,\: \'"=]{1,5}((?:rtmp\://|rtmpe\://).+?[^\'"&=]+?)[\'"&]{1}', new_content)
streamer = streamer.group(1)
swf_url = re.search('[,\: \'"=]{1,5}(http\://.+?\.swf)[\'"&]{1}', new_content).group(1)
playpath = re.search('file[,\: \'"=]*([^\'"]+?)[\'"&]{1}', new_content).group(1)
content = streamer + ' playpath=' + playpath + ' swfUrl=' + swf_url + ' pageUrl=' + page_url + ' timeout=15 live=1'
return (True, True, content, url)
return (False, False, content, url)
| [
"[email protected]"
]
| |
c0b6072667963c1a241351c1acac80180630ba7e | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba2855.pngMap.py | ef2996c17bc7de399c2d09bc36844fa2d997de60 | []
| no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba2855.pngMap = [
'11111111111111111111111111111111111111111111111111110000000000000000000000101111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000001111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000001111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111001111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111101111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111100011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111110100011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000010111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111101000000000000000000000000001111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111101000000000000000000000000000010111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000000000000010111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000000000000010111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000000001100111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000001001111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000001001001100001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000010000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111101111100000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000100111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111010000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111010000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000000101111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000000111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100001000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100001100001101111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000000001111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000011110000000110111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
]
| [
"[email protected]"
]
| |
9e68fb8d64881f8571975e3a2f5f99c37158d357 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_53/247.py | 266388f5f966aaf62ae3d3e229214be70a147baa | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | def snapper(n, k):
return (k+1) & ((1<<n) - 1) == 0
def main():
labels = ["OFF", "ON"]
try:
cases = xrange(1, int(raw_input())+1)
for case in cases:
n, k = map(int, raw_input().split())
print "Case #%d: %s" % (case, labels[snapper(n, k)])
except ValueError:
print "INVALID INPUT"
main()
| [
"[email protected]"
]
| |
9218a02e256da0f5f46dbfa773a0d6eccd56e154 | 0521afa39b2c9b64977da622779c906970af865b | /script/latent_factor_model.py | 7c5b4ec47ab25332ccedb53548d30c37e12042cc | []
| no_license | DamonHao/rec_sys | f48234f1689fb8f353a80a301647fa40bda9086d | e9272676d3794136f908eb9521a2944eefd9b38c | refs/heads/master | 2021-01-13T10:56:38.309864 | 2016-10-30T03:16:56 | 2016-10-30T03:16:56 | 72,264,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,438 | py | # -*- coding: utf-8 -*-
import random
import heapq
import math
class LatentFactorModel(object):
def __init__(self, filePath):
self._filePath = filePath
self._splitData(5, 1, 0)
def precisionAndRecall(self, N):
hit = 0
precision_all = 0
recall_all = 0
train = self._train
test = self._test
for user in train.iterkeys():
test_set_user_items = test.get(user, None)
if test_set_user_items is None:
continue
rank = self.recommend(user, N)
for item, pui in rank:
if item in test_set_user_items:
hit += 1
precision_all += N
recall_all += len(test_set_user_items)
return hit / float(precision_all), hit / float(recall_all)
def coverage(self, N):
recommend_items = set()
all_items = set()
train = self._train
num = 0
for user in train.iterkeys():
for item in train[user].iterkeys():
all_items.add(item)
rank = self.recommend(user, N)
num += len(rank)
for item, _ in rank:
recommend_items.add(item)
print "coverage", num, len(recommend_items), len(all_items)
return len(recommend_items) / float(len(all_items))
def popularity(self, N):
item_popularity = {}
popularity = 0
num = 0
for user, items in self._train.iteritems():
for item in items.iterkeys():
item_popularity.setdefault(item, 0)
item_popularity[item] += 1
for user in self._train.iterkeys():
rank = self.recommend(user, N)
for item, _ in rank:
popularity += math.log(1 + item_popularity[item])
num += 1
popularity /= float(num)
return popularity
def _splitData(self, totalSplitNum, kthAsTest, seed):
data = open(self._filePath)
random.seed(seed)
train = {} # {user : {item: score}}
test = {}
count = 0
for line in data:
user, item, score, _ = line.strip().split("::")
if random.randint(1, totalSplitNum) == kthAsTest:
test.setdefault(user, {})
test[user][item] = int(score)
else:
train.setdefault(user, {})
train[user][item] = int(score)
count += 1
if count == MAX_DATA_NUM:
break
print "data num:", count
self._train = train
self._test = test
def _sortPopularityItem(self):
itemsPopularity = {}
for items in self._train.itervalues():
for item in items:
itemsPopularity.setdefault(item, 0)
itemsPopularity[item] += 1
validNun = int(len(itemsPopularity) * 0.3)
print "total, valid num", len(itemsPopularity), validNun
assert validNun
sortedItems = heapq.nlargest(validNun, itemsPopularity.iteritems(), key=lambda e:e[1])
self._sortedItems = [item for item, _ in sortedItems]
def buildUserAction(self, negativeRatio):
self._sortPopularityItem()
sortedItems = self._sortedItems
maxSortedItemIndex = len(sortedItems)-1
usersAction = {}
for user, items in self._train.iteritems():
action = {}
# positive
for item in items:
action[item] = 1
negative_num = 0
# negative
itemsLen = len(items)
targetNegativeNum = itemsLen * negativeRatio
for i in xrange(0, itemsLen * 2):
item = sortedItems[random.randint(0, maxSortedItemIndex)]
if item in action:
continue
action[item] = 0
negative_num += 1
if negative_num >= targetNegativeNum :
break
usersAction[user] = action
self._usersAction = usersAction
def trainModel(self, itemClassNum, iterNum, learnRate, overfitParam):
self._itemClassNum = itemClassNum
self._initModel()
userToClass = self._userToClass
itemToClass = self._itemToClass
for step in xrange(iterNum):
for user, items in self._train.iteritems():
userAction = self._usersAction[user]
for item, interest in userAction.iteritems():
interestDiff = interest - self._predict(user, item)
for classIndex in xrange(itemClassNum):
userWeight = userToClass[user][classIndex]
itemWeight = itemToClass[item][classIndex]
userToClass[user][classIndex] += learnRate * (interestDiff * itemWeight - overfitParam * userWeight)
itemToClass[item][classIndex] += learnRate * (interestDiff * userWeight - overfitParam * itemWeight)
learnRate *= 0.9
def recommend(self, user, N):
has_items = self._train.get(user, None)
if not has_items:
return []
candidates = []
for item in self._itemToClass.iterkeys():
if item in has_items:
continue
interest = self._predict(user, item)
candidates.append((item, interest))
return heapq.nlargest(N, candidates, key=lambda e:e[1])
def _predict(self, user, item):
interest = 0
userToClass = self._userToClass
itemToClass = self._itemToClass
for index in xrange(self._itemClassNum):
interest += userToClass[user][index] * itemToClass[item][index]
return interest
def _initModel(self):
userToClass = {}
itemToClass = {}
epsilon = 0.1
itemClassNum = self._itemClassNum
for user, items in self._train.iteritems():
userToClass[user] = [random.uniform(0, epsilon) for i in xrange(itemClassNum)]
for item in items:
if item not in itemToClass:
itemToClass[item] = [random.uniform(0, epsilon) for i in xrange(itemClassNum)]
self._userToClass = userToClass
self._itemToClass = itemToClass
MAX_DATA_NUM = 100000
if __name__ == '__main__':
import os
filePath = os.path.join(os.path.dirname(__file__), '../ml-1m/ratings.dat')
lfm = LatentFactorModel(filePath)
lfm.buildUserAction(1)
lfm.trainModel(5, 100, 0.02, 0.01)
# print lfm.recommend('1', 10)
# print lfm.precisionAndRecall(10)
print lfm.coverage(10), lfm.popularity(10) | [
"[email protected]"
]
| |
bc3576ab54c7b1695d2d8d1f184e0c0fde88cb45 | 90047daeb462598a924d76ddf4288e832e86417c | /ios/web/payments/DEPS | 657dd83cf44bf5d166730b417f9e65e9d8f84f50 | [
"BSD-3-Clause"
]
| permissive | massbrowser/android | 99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080 | a9c4371682c9443d6e1d66005d4db61a24a9617c | refs/heads/master | 2022-11-04T21:15:50.656802 | 2017-06-08T12:31:39 | 2017-06-08T12:31:39 | 93,747,579 | 2 | 2 | BSD-3-Clause | 2022-10-31T10:34:25 | 2017-06-08T12:36:07 | null | UTF-8 | Python | false | false | 51 | include_rules = [
"+components/payments/core",
]
| [
"[email protected]"
]
| ||
faa20f2c671bf2819106d451219245da38e4ce8b | fe33bdb20436a379a17d56b83816d7064cb75d90 | /src/rocon_concert/concert_conductor/src/concert_conductor/transitions.py | f32eb0dbebdae0d4994c354a4df9ab4426789719 | []
| no_license | uml-robotics/catkin_tester | 764744614782acaff46f66f25dbd1650d0fcd5e8 | dfc8bb2026c06d0f97696a726a6773ff8b99496e | refs/heads/master | 2022-10-31T11:48:27.207535 | 2017-11-27T18:09:38 | 2017-11-27T18:09:38 | 111,495,779 | 0 | 1 | null | 2022-10-19T14:49:44 | 2017-11-21T03:45:59 | C | UTF-8 | Python | false | false | 4,511 | py | #!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_concert/license/LICENSE
#
"""
.. module:: transitions
This module does transition handling for the concert client state machine.
"""
##############################################################################
# Imports
##############################################################################
import concert_msgs.msg as concert_msgs
##############################################################################
# Aliases
##############################################################################
State = concert_msgs.ConcertClientState
##############################################################################
# Transitions
##############################################################################
class Dummy(object):
"""
Dummy transition handler for when there is nothing to do.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self):
"""
Nothing to do here.
"""
pass
class TransitionToGone(object):
"""
Transition handler when moving from any state to the gone state. This will always
occur if the remote gateway has disappeared from the hub's awareness (happens
when the remote gateway shuts down) or has been missing too long. We manually
update the fact that the gateway is no longer available in the concert client's
data here.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self, local_gateway):
"""
Nothing to do here.
"""
self.concert_client.msg.conn_stats.gateway_available = False
local_gateway.request_pulls(self.concert_client.msg.gateway_name, cancel=True)
class PendingToUninvited(object):
"""
Triggered when information about this client has been gathered.
This information is relayed to the concert client object itself in this transition.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self, platform_info, rapps):
"""
:param platform_info rocon_std_msgs/PlatformInfo: retrieved information about this client
:param rapps rocon_app_manager_msgs/Rapp[]: list of rapps runnable by this client.
"""
# this is legacy, and I think it's broken - I use concert alias now
# self.msg.name = rocon_uri.parse(platform_info.uri).name.string
self.concert_client.msg.platform_info = platform_info
self.concert_client.msg.rapps = rapps
class AvailableToMissing(object):
"""
Triggered when a robot is still with the concert, but has dropped its connection.
"""
def __init__(self, concert_client):
self.concert_client = concert_client
def __call__(self):
# Not implemented yet, thought I'd need to update something here,
# but may actually not be necessary..
pass
##############################################################################
# Transition Table
##############################################################################
StateTransitionTable = {
(State.PENDING, State.BAD) : Dummy, #@IgnorePep8 noqa
# (State.PENDING, State.BLOCKING) : Dummy,
# (State.PENDING, State.BUSY) : Dummy,
(State.PENDING, State.UNINVITED) : PendingToUninvited,
(State.PENDING, State.GONE) : TransitionToGone,
(State.UNINVITED, State.BAD) : Dummy,
(State.UNINVITED, State.BLOCKING) : Dummy,
(State.UNINVITED, State.BUSY) : Dummy,
(State.UNINVITED, State.JOINING) : Dummy,
(State.UNINVITED, State.GONE) : TransitionToGone,
# (State.JOINING, State.BAD) : Dummy,
(State.JOINING, State.AVAILABLE) : Dummy,
(State.JOINING, State.GONE) : TransitionToGone,
# (State.AVAILABLE, State.BAD) : Dummy,
(State.AVAILABLE, State.MISSING) : AvailableToMissing,
(State.AVAILABLE, State.UNINVITED): Dummy,
(State.AVAILABLE, State.GONE) : TransitionToGone,
(State.MISSING, State.AVAILABLE) : Dummy,
(State.MISSING, State.GONE) : TransitionToGone,
(State.BUSY, State.PENDING) : Dummy,
(State.BUSY, State.GONE) : TransitionToGone,
(State.BLOCKING, State.GONE) : TransitionToGone,
(State.BAD, State.GONE) : TransitionToGone,
}
"""
Table of valid transitions and their transition handlers.
"""
| [
"[email protected]"
]
| |
90a4237ca61b7f9c9261dcd9d368c2d88f4d51a1 | 2ab1aea0a5c9556b3ebc4aab3d436779e153ec03 | /repro_lap_reg/covar_results.py | a7653e5b937e9839d43d12252725bbcad6c52f82 | [
"MIT"
]
| permissive | idc9/repro_lap_reg | 8454b85df5d931dd3654dc4bdf50b3aacdaa185c | 1d3e846f8f2c3d04b4153d9ac56e0e9bd37198ca | refs/heads/main | 2023-05-31T21:57:35.379643 | 2021-07-06T19:14:46 | 2021-07-06T19:14:46 | 383,571,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from fclsp.reshaping_utils import vec_hollow_sym
from repro_lap_reg.utils import merge_dicts
from repro_lap_reg.results_utils import compare_vecs, compare_adj_mats
def get_covar_results(est, true, zero_tol=0):
"""
Parameters
----------
est: an Estimator
A covariance estimator.
true: array-like, shape (n_features, n_features)
zero_tol: float
Output
------
out: dict with keys 'utri' and 'graph'
"""
covar_est = get_covar(est)
est_utri = vec_hollow_sym(covar_est)
true_utri = vec_hollow_sym(true)
utri_results = compare_vecs(est=est_utri, truth=true_utri,
zero_tol=zero_tol)
graph_results = compare_adj_mats(est=covar_est, truth=true,
zero_tol=zero_tol)
results = merge_dicts(utri_results, graph_results, allow_key_overlap=False)
return results
def get_covar(estimator):
if hasattr(estimator, 'covariance_'):
return estimator.covariance_
elif hasattr(estimator, 'best_estimator_'):
return get_covar(estimator.best_estimator_)
else:
raise ValueError('No covariance matrix found')
| [
"[email protected]"
]
| |
eb7d4abc7fd412fb1fe580ea71764e891c5d8a3e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p2DJ/New/R2/benchmark/startQiskit81.py | f56bf0234644cb7f5c31a5b7d04b23a9581890e7 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # qubit number=2
# total number=9
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[1]) # number=5
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit81.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
29bbb97b476679fe6e940996af412393027ec248 | 36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1 | /restaurants/migrations/0026_auto_20181123_2205.py | 9c1f8e9c7b83d80b4adf2a46dac2d6330574bfcf | []
| no_license | phufoxy/fotourNew | 801ab2518424118020dc6e5f31a7ba90a654e56a | 6048c24f5256c8c5a0d18dc7b38c106a7c92a29c | refs/heads/master | 2023-04-13T01:34:22.510717 | 2018-12-26T03:46:09 | 2018-12-26T03:46:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 2.1 on 2018-11-23 15:05
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0025_auto_20181123_2155'),
]
operations = [
migrations.AlterField(
model_name='comment_restaurant',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 23, 22, 5, 52, 500874)),
),
]
| [
"[email protected]"
]
| |
6efa9c6232634b06fe3bf53c306b0b495efe8926 | 93c30152f4afa5c8feefa401e796529545e52ed2 | /Device/urls.py | c4af24562b1f856fcd9a76d87dfe4cc60ee38eac | []
| no_license | FellowCode/SmartHome | a32fa813a14b5b88d3f100736d062f0424208e1a | 58055d23c566f4f0030189c8157a7de1660cd991 | refs/heads/master | 2020-05-04T07:12:35.831914 | 2019-06-12T15:53:29 | 2019-06-12T15:53:29 | 179,022,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('change/', change_termo),
path('settings/<id>/', settings),
path('stat/<id>/', statistic),
] | [
"[email protected]"
]
| |
99590d3668e5e1ee3725df50ac329f078e2b39ca | c851ab2120617c7145280f691d877d4f763bc2a4 | /wwlib/rarc.py | 51c021fea0e7acee28037445ab15078a4fe88bbe | [
"MIT"
]
| permissive | W3st3rnSky/wwrando | 17787706580b1b297731c19d4926d07d0415f0e2 | 9973046e856709458a69f9bebd6ea8c9092f593d | refs/heads/master | 2020-07-30T12:12:07.838722 | 2019-09-23T00:23:12 | 2019-09-23T00:23:12 | 210,229,357 | 0 | 0 | MIT | 2019-09-22T23:47:34 | 2019-09-22T23:47:34 | null | UTF-8 | Python | false | false | 8,803 | py |
import os
from io import BytesIO
from fs_helpers import *
from wwlib.yaz0 import Yaz0
from wwlib.dzx import DZx
from wwlib.events import EventList
from wwlib.bmg import BMG
from wwlib.charts import ChartList
from wwlib.bdl import BDL
from wwlib.bti import BTIFile
class RARC:
def __init__(self, data):
self.data = data
if try_read_str(self.data, 0, 4) == "Yaz0":
self.data = Yaz0.decompress(self.data)
data = self.data
self.magic = read_str(data, 0, 4)
assert self.magic == "RARC"
self.size = read_u32(data, 4)
self.file_data_list_offset = read_u32(data, 0xC) + 0x20
self.file_data_total_size = read_u32(data, 0x10)
self.file_data_total_size_2 = read_u32(data, 0x14)
self.file_data_total_size_3 = read_u32(data, 0x18)
num_nodes = read_u32(data, 0x20)
node_list_offset = 0x40
self.total_num_file_entries = read_u32(data, 0x28)
file_entries_list_offset = read_u32(data, 0x2C) + 0x20
self.string_list_offset = read_u32(data, 0x34) + 0x20
self.nodes = []
for node_index in range(0, num_nodes):
offset = node_list_offset + node_index*0x10
node = Node(data, offset)
self.nodes.append(node)
self.file_entries = []
for node in self.nodes:
for file_index in range(node.first_file_index, node.first_file_index+node.num_files):
file_entry_offset = file_entries_list_offset + file_index*0x14
file_entry = FileEntry(data, file_entry_offset, self)
self.file_entries.append(file_entry)
node.files.append(file_entry)
self.instantiated_object_files = {}
def extract_all_files_to_disk_flat(self, output_directory):
# Does not preserve directory structure.
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
for file_entry in self.file_entries:
if file_entry.is_dir:
continue
output_file_path = os.path.join(output_directory, file_entry.name)
file_entry.data.seek(0)
with open(output_file_path, "wb") as f:
f.write(file_entry.data.read())
def extract_all_files_to_disk(self, output_directory=None):
# Preserves directory structure.
if output_directory is None:
output_directory, _ = os.path.splitext(self.file_path)
root_node = self.nodes[0]
self.extract_node_to_disk(root_node, output_directory)
def extract_node_to_disk(self, node, path):
if not os.path.isdir(path):
os.mkdir(path)
for file in node.files:
if file.is_dir:
if file.name not in [".", ".."]:
subdir_path = os.path.join(path, file.name)
subdir_node = self.nodes[file.node_index]
self.extract_node_to_disk(subdir_node, subdir_path)
else:
file_path = os.path.join(path, file.name)
file.data.seek(0)
with open(file_path, "wb") as f:
f.write(file.data.read())
def save_changes(self):
# Repacks the .arc file.
# Supports files changing in size but not changing filenames or adding/removing files.
# Cut off the file data first since we're replacing this data entirely.
self.data.truncate(self.file_data_list_offset)
self.data.seek(self.file_data_list_offset)
next_file_data_offset = 0
for file_entry in self.file_entries:
if file_entry.is_dir:
continue
data_size = data_len(file_entry.data)
file_entry.data_offset = next_file_data_offset
file_entry.data_size = data_size
file_entry.save_changes()
self.data.seek(self.file_data_list_offset + file_entry.data_offset)
file_entry.data.seek(0)
self.data.write(file_entry.data.read())
# Pad start of the next file to the next 0x20 bytes.
padded_data_size = (data_size + 0x1F) & ~0x1F
next_file_data_offset += padded_data_size
padding_size_needed = padded_data_size - data_size
self.data.write(b"\0"*padding_size_needed)
# Update rarc's size fields.
self.size = self.file_data_list_offset + next_file_data_offset
write_u32(self.data, 4, self.size)
self.file_data_total_size = next_file_data_offset
write_u32(self.data, 0x10, self.file_data_total_size)
if self.file_data_total_size_2 != 0:
# Unknown what this is for, but it must be properly set for arcs except for RELS.arc
self.file_data_total_size_2 = self.file_data_total_size
write_u32(self.data, 0x14, self.file_data_total_size_2)
if self.file_data_total_size_3 != 0:
# Unknown what this is for, but it must be properly set for RELS.arc
self.file_data_total_size_3 = self.file_data_total_size
write_u32(self.data, 0x18, self.file_data_total_size_3)
def get_file_entry(self, file_name):
for file_entry in self.file_entries:
if file_entry.name == file_name:
return file_entry
return None
def get_file(self, file_name):
if file_name in self.instantiated_object_files:
return self.instantiated_object_files[file_name]
file_entry = self.get_file_entry(file_name)
if file_entry is None:
return None
if file_name.endswith(".dzs"):
dzx = DZx(file_entry)
self.instantiated_object_files[file_name] = dzx
return dzx
elif file_name.endswith(".dzr"):
dzx = DZx(file_entry)
self.instantiated_object_files[file_name] = dzx
return dzx
elif file_name == "event_list.dat":
event_list = EventList(file_entry)
self.instantiated_object_files[file_name] = event_list
return event_list
elif file_name.endswith(".bmg"):
bmg = BMG(file_entry)
self.instantiated_object_files[file_name] = bmg
return bmg
elif file_name.endswith(".bdl"):
bdl = BDL(file_entry)
self.instantiated_object_files[file_name] = bdl
return bdl
elif file_name.endswith(".bti"):
bti = BTIFile(file_entry)
self.instantiated_object_files[file_name] = bti
return bti
elif file_name == "cmapdat.bin":
chart_list = ChartList(file_entry)
self.instantiated_object_files[file_name] = chart_list
return chart_list
else:
raise Exception("Unknown file type: %s" % file_name)
class Node:
def __init__(self, data, offset):
self.type = read_str(data, offset, 4)
self.name_offset = read_u32(data, offset+4)
self.name_hash = read_u16(data, offset+8)
self.num_files = read_u16(data, offset+0xA)
self.first_file_index = read_u32(data, offset+0xC)
self.files = [] # This will be populated after the file entries have been read.
class FileEntry:
def __init__(self, rarc_data, entry_offset, rarc):
self.entry_offset = entry_offset
self.rarc = rarc
self.id = read_u16(rarc_data, entry_offset)
self.name_hash = read_u16(rarc_data, entry_offset + 2)
type_and_name_offset = read_u32(rarc_data, entry_offset + 4)
data_offset_or_node_index = read_u32(rarc_data, entry_offset + 8)
self.data_size = read_u32(rarc_data, entry_offset + 0xC)
self.type = ((type_and_name_offset & 0xFF000000) >> 24)
# Type is a bitfield. Bits:
# 01 - File?
# 02 - Directory.
# 04 - Compressed.
# 10 - Data file? (As opposed to a REL file)
# 20 - For dynamic link libraries, aka REL files?
# 80 - Yaz0 compressed (as opposed to Yay0?).
self.is_dir = (self.type & 0x02) != 0
self.name_offset = type_and_name_offset & 0x00FFFFFF
self.name = read_str_until_null_character(rarc_data, rarc.string_list_offset + self.name_offset)
if self.is_dir:
self.node_index = data_offset_or_node_index
self.data = None
else:
self.data_offset = data_offset_or_node_index
rarc_data.seek(rarc.file_data_list_offset + self.data_offset)
self.data = BytesIO(rarc_data.read(self.data_size))
def decompress_data_if_necessary(self):
if try_read_str(self.data, 0, 4) == "Yaz0":
self.data = Yaz0.decompress(self.data)
self.type &= ~0x84 # Clear compressed type bits
def save_changes(self):
rarc_data = self.rarc.data
hash = 0
for char in self.name:
char = char.lower()
hash *= 3
hash += ord(char)
hash &= 0xFFFF
self.name_hash = hash
type_and_name_offset = (self.type << 24) | (self.name_offset & 0x00FFFFFF)
if self.is_dir:
data_offset_or_node_index = self.node_index
else:
data_offset_or_node_index = self.data_offset
self.data_size = data_len(self.data)
write_u16(rarc_data, self.entry_offset+0x2, self.name_hash)
write_u32(rarc_data, self.entry_offset+0x4, type_and_name_offset)
write_u32(rarc_data, self.entry_offset+0x8, data_offset_or_node_index)
write_u32(rarc_data, self.entry_offset+0xC, self.data_size)
| [
"[email protected]"
]
| |
bd7736ed9e1654b7791ad680e574daee736bddc6 | 7c63130ae44e773a51fcd38c5dc3116f46daecd7 | /error/Predicted_Results/test_sample7_7recom_model1.py | 8a8c36851f91301d7ddcb8594c7239eacbeb0a5d | []
| no_license | GitHubdeWill/code_recom | a4e8e393592d210b0481f61a3cc89ea475c95153 | 954c334e4abb25aa96786c9efa8f8ca22bc286aa | refs/heads/master | 2020-04-12T15:14:02.395548 | 2018-12-20T12:07:31 | 2018-12-20T12:07:31 | 162,574,531 | 0 | 1 | null | 2019-12-02T14:28:38 | 2018-12-20T12:07:00 | Python | UTF-8 | Python | false | false | 1,179 | py | class Canvas:
def __init__(self, width, height):
self.width = width
self.height = height
self.data = [[' '] * width for i in range(height)]
def setpixel(self, row, col):
self.data[row][col] = '*'
def getpixel(self, row, col):
return self.data[row][col]
def display(self):
print "\n".join(["".join(row) for row in self.data])
class Shape:
def paint(self, canvas): pass
class Rectangle(Shape):
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def hline(self, x, y, w):
pass
def vline(self, x, y, h):
pass
def paint(self, canvas):
hline(self.x, self.y, self.w)
hline(self.x, self.y + self.h, self.w)
vline(self.x, self.y, self.h)
vline(self.x + self.w, self.y, self.h)
class Square(Rectangle):
def __init__(self, x, y, size):
Rectangle.__init__(self, x, y, size, self._download_class=True)
)
class CompoundShape(Shape):
def __init__(self, shapes):
self.shapes = shapes
def paint(self, canvas):
for s in self.shapes:
s.paint(canvas) | [
"[email protected]"
]
| |
74ee475e802a1523ea256fe0e91b157437daa072 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /pruning_identified_exemplars/save_checkpoint/imagenet_train_eval.py | 6e55cfcb1026e17d743d3876706537783e2cf68a | [
"LicenseRef-scancode-proprietary-license",
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 7,235 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Training script to sparsify a ResNet-50.
"""
import os
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from pruning_identified_exemplars.utils import model_utils
# model params
flags.DEFINE_integer(
'steps_per_checkpoint', 500,
'Controls how often checkpoints are generated. More steps per '
'checkpoint = higher utilization of TPU and generally higher '
'steps/sec')
flags.DEFINE_float('label_smoothing', 0.1,
'Relax confidence in the labels by (1-label_smoothing).')
flags.DEFINE_integer('steps_per_eval', 1251,
'Controls how often evaluation is performed.')
flags.DEFINE_integer('num_cores', 8, 'Number of cores.')
flags.DEFINE_string('output_dir', '',
'Directory where to write event logs and checkpoint.')
flags.DEFINE_string('mode', 'train',
'One of {"train_and_eval", "train", "eval"}.')
flags.DEFINE_string('train_dir', '',
'The location of the tfrecords used for training.')
flags.DEFINE_string('eval_dir', '',
'The location of the tfrecords used for eval.')
flags.DEFINE_string('master', 'local', 'Name of the TensorFlow master to use.')
# pruning flags
flags.DEFINE_string('pruning_hparams', '',
'Comma separated list of pruning-related hyperparameters')
flags.DEFINE_float('end_sparsity', 0.1,
'Target sparsity desired by end of training.')
flags.DEFINE_integer('sparsity_begin_step', 5000, 'Step to begin pruning at.')
flags.DEFINE_integer('sparsity_end_step', 8000, 'Step to end pruning at.')
flags.DEFINE_integer('pruning_frequency', 500, 'Step interval between pruning.')
flags.DEFINE_enum(
'pruning_method', 'baseline',
('threshold', 'random_independent', 'random_cumulative', 'baseline'),
'Method used for pruning'
'Specify as baseline if no pruning is used.')
flags.DEFINE_bool('log_class_level_summaries', True,
'Boolean for whether to log class level precision/accuracy.')
flags.DEFINE_float('expansion_factor', 6.,
'how much to expand filters before depthwise conv')
flags.DEFINE_float(
'training_steps_multiplier', 1.0,
'Training schedule is shortened or extended with the '
'multiplier, if it is not 1.')
flags.DEFINE_integer('block_width', 1, 'width of block')
flags.DEFINE_integer('block_height', 1, 'height of block')
# set this flag to true to do a test run of this code with synthetic data
flags.DEFINE_bool('test_small_sample', True,
'Boolean for whether to test internally.')
FLAGS = flags.FLAGS
imagenet_params = {
'sloppy_shuffle': True,
'num_cores': 8,
'train_batch_size': 4096,
'num_train_images': 1281167,
'num_eval_images': 50000,
'num_label_classes': 1000,
'num_train_steps': 32000,
'base_learning_rate': 0.1,
'weight_decay': 1e-4,
'eval_batch_size': 1024,
'mean_rgb': [0.485 * 255, 0.456 * 255, 0.406 * 255],
'stddev_rgb': [0.229 * 255, 0.224 * 255, 0.225 * 255]
}
def main(argv):
del argv # Unused.
initial_sparsity = 0.0
pruning_hparams_string = ('begin_pruning_step={0},'
'sparsity_function_begin_step={0},'
'end_pruning_step={1},'
'sparsity_function_end_step={1},'
'target_sparsity={2},'
'initial_sparsity={3},'
'pruning_frequency={4},'
'threshold_decay=0,'
'block_width={5},'
'block_height={6}'.format(
FLAGS.sparsity_begin_step,
FLAGS.sparsity_end_step, FLAGS.end_sparsity,
initial_sparsity, FLAGS.pruning_frequency,
FLAGS.block_width, FLAGS.block_height))
params = imagenet_params
if FLAGS.test_small_sample:
output_dir = '/tmp/imagenet_train_eval/'
else:
# configures train directories based upon hyperparameters.
if FLAGS.pruning_method:
folder_stub = os.path.join(FLAGS.pruning_method, str(FLAGS.end_sparsity),
str(FLAGS.sparsity_begin_step),
str(FLAGS.sparsity_end_step))
else:
folder_stub = os.path.join('baseline', str(0.0), str(0.0), str(0.0),
str(0.0), str(FLAGS.resnet_depth))
output_dir = os.path.join(FLAGS.output_dir, folder_stub)
update_params = {
'lr_schedule': [(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)],
'momentum': 0.9,
'data_format': 'channels_last',
'output_dir': output_dir,
'label_smoothing': FLAGS.label_smoothing,
}
params.update(update_params)
if FLAGS.pruning_method != 'baseline':
params['pruning_method'] = FLAGS.pruning_method
else:
params['pruning_method'] = None
params['mode'] = FLAGS.mode
if FLAGS.mode == 'train':
params['batch_size'] = params['train_batch_size']
params['task'] = 'imagenet_training'
params['data_dir'] = FLAGS.train_dir
else:
params['batch_size'] = params['eval_batch_size']
params['task'] = 'imagenet_eval'
params['data_dir'] = FLAGS.eval_dir
if FLAGS.test_small_sample:
update_params = {
'batch_size': 2,
'num_train_steps': 10,
'num_images': 2,
'num_train_images': 10,
'num_eval_images': 10,
}
params['test_small_sample'] = True
params.update(update_params)
else:
params['test_small_sample'] = False
if FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf2.train.checkpoints_iterator(params['output_dir']):
tf.logging.info('Starting to evaluate.')
try:
_ = model_utils.initiate_task_helper(
ckpt_directory=ckpt, model_params=params, pruning_params=None)
current_step = int(os.path.basename(ckpt).split('-')[1])
if current_step >= params['num_train_steps']:
tf.logging.info('Evaluation finished')
break
except tf.errors.NotFoundError:
tf.logging.info('Checkpoint was not found, skipping checkpoint.')
else:
if FLAGS.mode == 'train':
tf.logging.info('start training...')
model_utils.initiate_task_helper(
ckpt_directory=None,
model_params=params,
pruning_params=pruning_hparams_string)
tf.logging.info('finished training.')
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
]
| |
9d191ebc88daf5624bd0ef8db05ca5582c623a17 | 3cdd7019f3acbf7b7a7e879444454703fcc73d62 | /solutions/57.insert-interval.py | 5d2417be706aee6d99ed4e78e3e33eba5144c8ed | []
| no_license | quixoteji/Leetcode | 1dc2e52e53a7b58d9bae15ce2d5c4142cbd365af | 00bf9a8164008aa17507b1c87ce72a3374bcb7b9 | refs/heads/master | 2021-07-15T07:59:21.294297 | 2020-05-13T03:08:47 | 2020-05-13T03:08:47 | 138,812,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #
# @lc app=leetcode id=57 lang=python3
#
# [57] Insert Interval
#
# @lc code=start
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
return self.sol1(intervals, newInterval)
# Solution 1
def sol1(self, intervals, newInterval) :
ans = []
n = len(intervals)
for i in range(len(intervals)):
if intervals[i][0] > newInterval[0] :
intervals.insert(i, newInterval)
if len(intervals) == n : intervals.append(newInterval)
print(intervals)
for interval in intervals :
if not ans or interval[0] > ans[-1][1] :
ans.append(interval)
else :
ans[-1][1] = max(ans[-1][1], interval[1])
return ans
# @lc code=end
| [
"[email protected]"
]
| |
638c780ac1d2224bab250dbad8d30263e3c8425f | 159c3669bfe0525b0608bb658971cf4a7d82c7c5 | /query_learn/models.py | ab0f74ef981aa922e6f460db759d0b96ddd4ec01 | []
| no_license | jatinkatyal13/Django_Boiler_Plate | eb163486dc0307c8c0a5e4cbcdfee53826a3640c | 550f50f33be396e3c82082cc722ec897dadf04a8 | refs/heads/master | 2020-03-11T03:49:39.089700 | 2018-04-28T10:50:31 | 2018-04-28T10:50:31 | 129,759,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from django.db import models
# Create your models here.
class Blog(models.Model):
name = models.CharField(max_length = 100)
website = models.URLField()
def __str__ (self):
return self.name
class Author(models.Model):
name = models.CharField(max_length = 200)
score = models.IntegerField(default = 1)
def __str__ (self):
return self.name
class Entry(models.Model):
text = models.TextField()
blog = models.ForeignKey(Blog, on_delete = models.CASCADE)
author = models.ManyToManyField(Author) | [
"[email protected]"
]
| |
f566ce02ce3a9f62a9487b3ae38419afe38437c6 | 40699a136c4f4500833e21e0d7863a1ba624a5cd | /pde/tools/tests/test_parameters.py | 761a0ce0f072ff26b20329f71a0ec5e47ec257c4 | [
"MIT"
]
| permissive | binder-oilgains/py-pde | 0e0226678b2598b50aa72762d9a70bb8e9536e43 | d76977095f1e915c63230e6895391f063d0778d8 | refs/heads/main | 2023-02-23T09:09:05.543386 | 2021-02-02T00:55:40 | 2021-02-02T00:55:40 | 334,454,351 | 0 | 0 | MIT | 2021-01-30T16:09:14 | 2021-01-30T16:09:13 | null | UTF-8 | Python | false | false | 5,307 | py | """
.. codeauthor:: David Zwicker <[email protected]>
"""
import itertools
import logging
import pickle
import numpy as np
import pytest
from pde.tools.parameters import (
DeprecatedParameter,
HideParameter,
Parameter,
Parameterized,
get_all_parameters,
sphinx_display_parameters,
)
def test_parameters():
""" test mixing Parameterized """
param = Parameter("a", 1, int, "help", extra={"b": 3})
assert isinstance(str(param), str)
p_string = pickle.dumps(param)
param_new = pickle.loads(p_string)
assert param.__dict__ == param_new.__dict__
assert param is not param_new
assert param_new.extra["b"] == 3
class Test1(Parameterized):
parameters_default = [param]
t = Test1()
assert t.parameters["a"] == 1
assert t.get_parameter_default("a") == 1
t = Test1(parameters={"a": 2})
assert t.parameters["a"] == 2
assert t.get_parameter_default("a") == 1
with pytest.raises(ValueError):
t = Test1(parameters={"b": 3})
t = Test1()
ps = t._parse_parameters({"b": 3}, check_validity=False)
assert ps["a"] == 1
assert ps["b"] == 3
class Test2(Test1):
# also test conversion of default parameters
parameters_default = [Parameter("b", "2", int, "help")]
t = Test2()
assert t.parameters["a"] == 1
assert t.parameters["b"] == 2
t = Test2(parameters={"a": 10, "b": 20})
assert t.parameters["a"] == 10
assert t.parameters["b"] == 20
assert t.get_parameter_default("a") == 1
assert t.get_parameter_default("b") == "2"
with pytest.raises(KeyError):
t.get_parameter_default("c")
class Test3(Test2):
# test overwriting defaults
parameters_default = [Parameter("a", 3), Parameter("c", 4)]
t = Test3()
assert t.parameters["a"] == 3
assert t.get_parameter_default("a") == 3
assert set(t.parameters.keys()) == {"a", "b", "c"}
# test get_all_parameters function after having used Parameters
p1 = get_all_parameters()
for key in ["value", "description"]:
p2 = get_all_parameters(key)
assert set(p1) == p2.keys()
# test whether sphinx_display_parameters runs
lines = [":param parameters:"]
sphinx_display_parameters(None, "class", "Test1", Test1, None, lines)
assert len(lines) > 1
def test_parameters_simple():
""" test adding parameters using a simple dictionary """
class Test(Parameterized):
parameters_default = {"a": 1}
t = Test()
assert t.parameters["a"] == 1
def test_parameter_help(monkeypatch, capsys):
""" test how parameters are shown """
class Test1(Parameterized):
parameters_default = [DeprecatedParameter("a", 1, int, "random string")]
class Test2(Test1):
parameters_default = [Parameter("b", 2, int, "another word")]
t = Test2()
for in_jupyter in [False, True]:
monkeypatch.setattr("pde.tools.output.in_jupyter_notebook", lambda: in_jupyter)
for flags in itertools.combinations_with_replacement([True, False], 3):
Test2.show_parameters(*flags)
o1, e1 = capsys.readouterr()
t.show_parameters(*flags)
o2, e2 = capsys.readouterr()
assert o1 == o2
assert e1 == e2 == ""
def test_hidden_parameter():
""" test how hidden parameters are handled """
class Test1(Parameterized):
parameters_default = [Parameter("a", 1), Parameter("b", 2)]
assert Test1().parameters == {"a": 1, "b": 2}
class Test2(Test1):
parameters_default = [HideParameter("b")]
class Test2a(Parameterized):
parameters_default = [Parameter("a", 1), Parameter("b", 2, hidden=True)]
for t_class in [Test2, Test2a]:
assert "b" not in t_class.get_parameters()
assert len(t_class.get_parameters()) == 1
assert len(t_class.get_parameters(include_hidden=True)) == 2
t2 = t_class()
assert t2.parameters == {"a": 1, "b": 2}
assert t2.get_parameter_default("b") == 2
with pytest.raises(ValueError):
t2._parse_parameters({"b": 2}, check_validity=True, allow_hidden=False)
class Test3(Test1):
parameters_default = [Parameter("b", 3)]
t3 = Test3()
assert t3.parameters == {"a": 1, "b": 3}
assert t3.get_parameter_default("b") == 3
def test_convert_default_values(caplog):
""" test how default values are handled """
class Test1(Parameterized):
parameters_default = [Parameter("a", 1, float)]
with caplog.at_level(logging.WARNING):
t1 = Test1()
assert "Default value" not in caplog.text
assert isinstance(t1.parameters["a"], float)
class Test2(Parameterized):
parameters_default = [Parameter("a", np.arange(3), np.array)]
t2 = Test2()
np.testing.assert_equal(t2.parameters["a"], np.arange(3))
class Test3(Parameterized):
parameters_default = [Parameter("a", [0, 1, 2], np.array)]
t3 = Test3()
np.testing.assert_equal(t3.parameters["a"], np.arange(3))
class Test4(Parameterized):
parameters_default = [Parameter("a", 1, str)]
with caplog.at_level(logging.WARNING):
t4 = Test4()
assert "Default value" in caplog.text
np.testing.assert_equal(t4.parameters["a"], "1")
| [
"[email protected]"
]
| |
4f0e1f57ce50c3a051c432c5570ea57775a38300 | 4589a9ea76e458793ad78059839b81d365f433de | /athena_automation/athenataf/tests/group_management/group_management/non_default_value_check/NonDefaultValueCheck.py | b7af3ed0749dbbab1b9fdca59e171016b0940a66 | []
| no_license | cash2one/reautomation_handoff | 5e2c4c432d8f658d1b57211782744bd0b56c52f6 | 7ef83572d659db35036189eb394f99de1369db5a | refs/heads/master | 2020-05-22T17:56:33.214080 | 2015-07-13T07:51:18 | 2015-07-13T07:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,970 | py | import logging
logger = logging.getLogger('athenataf')
import time
from athenataf.lib.functionality.test.AthenaGUITestCase import AthenaGUITestCase
class NonDefaultValueCheck(AthenaGUITestCase):
'''
Test class for NonDefaultValueCheck.
'''
def _create_network(self , network_page):
time.sleep(10)
network_page.delete_network_if_present()
network_page.delete_wired_network_if_present()
basic_info = network_page.create_new_network()
vlan_page = basic_info.employee_network_info()
security_page = vlan_page.use_vlan_defaults()
access_page = security_page.set_default_settings()
access_page.click_role_radio_and_click_finish_button()
basic_info = network_page.create_new_network()
vlan_obj = basic_info.wired_employee_network_info()
security = vlan_obj.wired_vlan_defaults()
security.wired_employee_security_defaults()
access = security.wired_security_defaults()
network_assign = access.use_access_defaults()
network_assign.finish_network_setup()
def test_ath_11748_group_configuration(self):
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_group():
if inner_left_panel.assert_sample_group_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller()
inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
elif inner_left_panel.assert_sample_group_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_groups()
create_group_page = inner_left_panel.add_group()
create_group_page.create_empty_group()
inner_left_panel.select_samplegroup()
network_page = self.LeftPanel.go_to_network_page()
self._create_network(network_page)
inner_left_panel.click_all_groups_label()
inner_left_panel = self.TopPanel.click_slider_icon()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller2()
inner_left_panel.select_samplegroup()
network_page = self.LeftPanel.go_to_network_page()
self._create_network(network_page)
self.take_s2_snapshot()
network_page.delete_network_if_present()
network_page.delete_wired_network_if_present()
inner_left_panel.click_all_groups_label()
inner_left_panel = self.TopPanel.click_slider_icon()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group1()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_group()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear()
def test_ath_11521_create_group(self):
conf=self.config.config_vars
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_mygroup():
if inner_left_panel.assert_mygroup_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
elif inner_left_panel.assert_mygroup_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
if inner_left_panel.assert_mynew_group():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_empty_groups(conf.mynew)
create_group_page = inner_left_panel.add_group()
create_group_page.create_group_with_vc(conf.Mygroup)
create_group_page = inner_left_panel.add_group()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.assert_mygroup_and_mynew()
manage_group_page.click_manage_group_close_button()
self.take_s2_snapshot()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
manage_group_page.delete_empty_mynew_group()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear()
def test_ath_11523_delete_group(self):
conf=self.config.config_vars
inner_left_panel = self.TopPanel.click_slider_icon()
self.take_s1_snapshot()
if inner_left_panel.assert_mygroup():
if inner_left_panel.assert_mygroup_with_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
elif inner_left_panel.assert_mygroup_without_vc_present():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
if inner_left_panel.assert_mynew_group():
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
manage_group_page.click_manage_group_close_button()
create_group_page = inner_left_panel.add_group()
create_group_page.create_multiple_empty_groups(conf.mynew)
create_group_page = inner_left_panel.add_group()
create_group_page.create_group_with_vc(conf.Mygroup)
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mynew_group()
manage_group_page.delete_empty_mygroup()
manage_group_page.assert_group_has_swarm()
self.take_s2_snapshot()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.move_virtual_controller5()
manage_group_page = inner_left_panel.manage_group()
manage_group_page.delete_empty_mygroup()
self.browser.refresh()
self.take_s3_snapshot()
self.assert_s1_s2_diff(None)
self.assert_s1_s3_diff()
self.clear() | [
"[email protected]"
]
| |
c05a4d2c8f71e43c742416226d3a37a8bec52e4e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /HvkPdhijquecKASdF_4.py | 28fe72dc5e2ebc80b433f466d3102b76d2b8443e | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | """
Create a function that takes a dictionary of student names and returns a list
of student names in **alphabetical order**.
### Examples
get_student_names({
"Student 1" : "Steve",
"Student 2" : "Becky",
"Student 3" : "John"
}) ➞ ["Becky", "John", "Steve"]
### Notes
* Don't forget to `return` your result.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def get_student_names(students):
return sorted([students[items] for items in students])
| [
"[email protected]"
]
| |
d11536917ccd84af47b21f2d5b765a44747909d1 | abf6e809ed4d07a4fb144ed6ab467dc1f17769ae | /mjlib/BUILD | 9c0fb0521ce80c390339147535f3d6457ac3d06f | [
"Apache-2.0"
]
| permissive | mjy2002/moteus | fdf55d88779be2b79316c11c50d40772bcf525bb | a30cd636d061b3b19c36468f79b174f70a6e9376 | refs/heads/master | 2020-08-06T09:33:35.519239 | 2019-10-03T20:21:43 | 2019-10-03T20:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | # -*- python -*-
# Copyright 2018 Josh Pieper, [email protected].
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package(default_visibility = ["//visibility:public"])
test_suite(
name = "host",
tests = [
"//mjlib/base:test",
"//mjlib/io:test",
"//mjlib/micro:test",
"//mjlib/multiplex:test",
"//mjlib/multiplex:py_test",
"//mjlib/telemetry:test",
"//mjlib/telemetry/js:test",
],
)
| [
"[email protected]"
]
| ||
698c729f923f3786348bd575ba07df82bc2097ed | cd57ad36685cc188ea42219bd220905e23e61f4c | /tests/logic/test_time.py | b2789a57f2b7b17fc9152dbc40382c556402ac8a | [
"BSD-3-Clause"
]
| permissive | gitCommitWiL/ChatterBot | fa404848c7eb8f8ffb07c80c7d3ec47aeb2fe177 | 4f2275ec8a6e3546c4251db9e9938f7b3fd29e68 | refs/heads/master | 2021-04-22T14:52:18.175648 | 2020-03-26T11:22:16 | 2020-03-26T11:22:16 | 249,854,439 | 2 | 0 | BSD-3-Clause | 2020-03-25T01:02:46 | 2020-03-25T01:02:46 | null | UTF-8 | Python | false | false | 815 | py | from tests.base_case import ChatBotTestCase
from chatterbot.logic import TimeLogicAdapter
from chatterbot.conversation import Statement
class TimeAdapterTests(ChatBotTestCase):
def setUp(self):
super().setUp()
self.adapter = TimeLogicAdapter(self.chatbot)
def test_positive_input(self):
statement = Statement(text="Do you know what time it is?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 1)
self.assertIn("The current time is ", response.text)
def test_negative_input(self):
statement = Statement(text="What is an example of a pachyderm?")
response = self.adapter.process(statement)
self.assertEqual(response.confidence, 0)
self.assertIn("The current time is ", response.text)
| [
"[email protected]"
]
| |
20ef6a2b50cafbfbe8d5fc7d867265690b62d4c0 | 535503dc18c38b92f8520289da5b4fa42b0a722a | /code/exp_control/sequencer/sequences/Ramsey2_D52_D32.py | f1fc50821392952cb3b5aa8a7897c1b7850731bf | []
| no_license | jamesbate/phd_code | fbbbf7657c428a0a1f18768edca1dfce56801cc1 | 7e71d7f041835497fb421dd741c644ab5c8e3805 | refs/heads/master | 2023-05-07T10:31:22.168217 | 2021-05-26T15:00:40 | 2021-05-26T15:00:40 | 371,073,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,555 | py | # General S/D experiment
<VARIABLES>
# initializing the ion to the s state with 854 laser
MeasPoints=self.set_variable("float","MeasPoints",50,1,2e5)
n_loops=self.set_variable("float","n_loops",1,1,500000)
p729_pulse=self.set_variable("bool","p729_pulse",0)
pulse729_length=self.set_variable("float","pulse729_length",1000.000000,0,2e5)
# Raman pulse
pulse_393=self.set_variable("bool","pulse_393",1)
raman_length=self.set_variable("float","raman_length",50.000000,0,2e5)
# ion analys
do_pipulse=self.set_variable("bool","do_pipulse",0)
pitime_carrier=self.set_variable("float","pitime_carrier",2.,0,2e5)
do_pihalf=self.set_variable("bool","do_pihalf",0)
qubit_pihalf=self.set_variable("float","qubit_pihalf",2.,0,2e5)
analyse_ion=self.set_variable("bool","analyse_ion",1)
new_zealand=self.set_variable("float","new_zealand",2.,0,2e5)
phase=self.set_variable("float","phase",0.,0,50)
# Doppler cooling
doppler_length=self.set_variable("float","doppler_length",5000.000000,1,2e5)
# detection time
det_time=self.set_variable("float","det_time",5000.000000,0.01,2e7)
# sideband cooling
sb_cool_com=self.set_variable("bool","sb_cool_com",0)
#sb_cool_strech=self.set_variable("bool","sb_cool_strech",0)
SBCool_time=self.set_variable("float","SBCool_time",2000.000000,1,2e5)
sb_cool_rad1=self.set_variable("bool","sb_cool_rad1",0)
sb_cool_rad2=self.set_variable("bool","sb_cool_rad2",0)
SBCool_time_rad=self.set_variable("float","SBCool_time_rad",5000.000000,1,2e5)
#optical pumping with 397 sigma
opt_pumping=self.set_variable("bool","opt_pumping",1)
pump_length=self.set_variable("float","pump_length",40.000000,1,2e5)
repump866_length=self.set_variable("float","repump_length866",20.000000,1,2e5)
# delays during the experiment
delay=self.set_variable("float","delay",2,0,500000)
repump_length=self.set_variable("float","repump_length854",20.000000,1,2e5)
# before detecting we can switch on the 854 to see if it can depopulate the D state
#repump_test=self.set_variable("bool","repump_test",0)
#repump_test_length=self.set_variable("float","repump_test_length",20.000000,1,2e5)
#dummy_var=int(self.set_variable("float","maesurement_type",0,0,1e5))
mes_type=self.set_variable("float","mes_type",0,0,2e5)
</VARIABLES>
<TRANSITIONS>
</TRANSITIONS>
# The save form specifies which data will be saved and how, when a scan is performed.
# If this is omitted a standard form is used
<SAVE FORM>
.dat ; %1.2f
PMTcounts; 1;sum; (1:N); %1.0f
</SAVE FORM>
# Here the sequence can override program parameters. Syntax follows from "Write Token to Params.vi"
<PARAMS OVERRIDE>
AcquisitionMode fluorescence
Cycles 50
</PARAMS OVERRIDE>
#<TRANSITIONS>
# work around to "error while getting tag" info
#</TRANSITIONS>
<SEQUENCE>
#turning all Lasers off
#ttl_pulse("31",10,is_last=True)
TTLsOff(["854 sw","Pi397","Sigma397","dp397","397det","866 sw"])
InitPulse(50)
delay = 2
# rf_on(150, -100, dds_address=0, start_time = 0)
#setTTLOn("729_not_393second",0,is_last=True)
for i in range(int(n_loops)):
ttl_pulse("854 sw",repump_length,is_last=False)
DopplerCooling(doppler_length, is_last=False)
PMTDetection(doppler_length)
seq_wait(1)
if opt_pumping:
ttl_pulse(["Sigma397","dp397"],pump_length,is_last=False)
ttl_pulse("866 sw",pump_length+repump866_length,is_last=True)
#ttl_pulse(["854"],repump_length,is_last=False)
seq_wait(0.1)
if sb_cool_com:
setTTLOff("729SPswitch",0,is_last=True)
SBCooling2(length = SBCool_time)
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(0.1)
seq_wait(7)
if sb_cool_rad1:
SBCooling2(length = SBCool_time_rad, transition="sideband_cool_rad1")
if sb_cool_rad2:
SBCooling2(length = SBCool_time_rad, transition="sideband_cool_rad2")
seq_wait(0.1)
seq_wait(7)
if p729_pulse:
setTTLOff("729SPswitch",0,is_last=True)
rf_pulse(pulse729_length, 0, ion=1, transition_param='729_Probe', is_last=True, address=1)
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(delay)
if pulse_393:
seq_wait(delay)
if 1:#(mes_type >5):
RamanPulse(raman_length)
seq_wait(delay)
if analyse_ion:
do_pipulse = 1
do_pihalf =1
#pitime_carrier = 1.0
#qubit_pihalf = 0.5
if do_pihalf: #mes_type % 3 == 2:
setTTLOn("729SPswitch",0,is_last=True)
rf_pulse(qubit_pihalf, 0*3.14159*0.25, ion=1, transition_param='729_qubit', is_last=True, address=1) #sigma y
seq_wait(1)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(1)
if(do_pipulse):
#seq_wait(1000) # new zealand
setTTLOn("729SPswitch",0,is_last=True)
rf_pulse(pitime_carrier, 0, ion=1, transition_param='729_Carrier', is_last=True, address=1)
seq_wait(delay)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(1)
if analyse_ion:
setTTLOn("729SPswitch",0,is_last=True)
seq_wait(new_zealand) # new zealand
rf_pulse(pitime_carrier, 0, ion=1, transition_param='729_Carrier', is_last=True, address=1)
seq_wait(delay)
rf_pulse(qubit_pihalf, phase*3.14159*0.5, ion=1, transition_param='729_qubit', is_last=True, address=1)
seq_wait(delay)
setTTLOff("729SPswitch",0,is_last=True)
seq_wait(delay)
ttl_pulse(["Pi397","dp397","866 sw","397det"],det_time,is_last=False)
PMTDetection(det_time)
seq_wait(1)
TTLsOff(["Pi397","866 sw"])
</SEQUENCE>
<AUTHORED BY LABVIEW>
1
</AUTHORED BY LABVIEW>
| [
"[email protected]"
]
| |
e37e107190dff0ae34e8723deb1d746d87aba1fb | 2181795d5c380fef6d929f28fb4c90c0b1ffdc50 | /PythonScript/twitter_streaming.py | c6658c25cdde056fd59a770e2c3519d64af1bda2 | []
| no_license | pvhuu/Social-Network-Analysis | fa2c69460b7f811b16d4edfcdd99359825ca046e | c54d25eaa5838ea1e118cf8000a3a0bedf3b1ccd | refs/heads/master | 2020-04-24T04:55:24.381193 | 2019-02-25T12:31:57 | 2019-02-25T12:31:57 | 171,719,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | import sys
import string
import time
from tweepy import Stream
from tweepy.streaming import StreamListener
from twitter_client import get_twitter_auth
class CustomListener(StreamListener):
""" Custom StreamListener from streaming Twitter data """
def __init__(self,fname):
safe_fname = format_filename(fname)
self.outfile = "stream_%s.jsonl" % safe_fname
def on_data(self,data):
try:
with open(self.outfile, 'a') as f:
f.write(data)
return True
except BaseException as e:
sys.stderr.write("Error on_data: {}\n".format(e))
time.sleep(5)
return True
def on_error(self,status):
if status == 420:
sys.stderr.write("Rate limit exceeded\n")
return False
else:
sys.stderr.write("Error {}\n".format(status))
return True
def format_filename(fname):
"""
Convert fname into a safe string for a file name.
Return: string
"""
return ''.join(convert_valid(one_char) for one_char in fname)
def convert_valid (one_char):
"""
Convert a character into '_' if "invalid".
Return: string
"""
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
if one_char in valid_chars:
return one_char
else:
return '_'
if __name__ == '__main__':
query = sys.argv[1:] #list of CLI arguments
query_fname =' '.join(query) #string
auth = get_twitter_auth()
twitter_stream = Stream(auth, CustomListener(query_fname)
twitter_stream.filter(track=query,async=True)
| [
"="
]
| = |
e6ea1c5b7867d5bcc40adbeab05230c3eb764d24 | b9963ffb80aad7e057bc375edb85ac7ed5a837d0 | /knowit2016/knowit19.py | 24d450541ccf6ab2c23a57b0b5a410272753a1e4 | [
"MIT"
]
| permissive | matslindh/codingchallenges | a2db9f4579e9f35189f5cdf74590863cf84bdf95 | a846e522f7a31e988c470cda87955ee3ef20a274 | refs/heads/main | 2022-12-23T15:56:19.776354 | 2022-12-15T21:03:37 | 2022-12-15T21:03:37 | 76,491,177 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import string
out = open("input/knowit19_output.pgm", "w")
#out_bin = open("input/knowit19_output.bin", "wb")
s = ''.join(open("input/knowit19").readlines()).replace("\n", '')
for i in range(0, len(s), 2):
pass
#out_bin.write(chr(int(s[i:i + 2])).encode("ascii"))
height = 21
width = int(len(s) / (height * 2))
out.write("P2\n" + str(width) + ' ' + str(height) + "\n99\n")
for i in range(0, len(s), 2):
letter = '99' if int(s[i:i+2]) % 2 == 0 else '0'
if len(letter) < 2:
letter = ' ' + letter
out.write(letter + ' ')
if (i + 2) % width == 0:
out.write("\n")
for line in open("input/knowit19").readlines():
line = line.strip()
# print(int(line)&0xff)
str = ''.join(open("input/knowit19").readlines()).replace("\n", '')
freq = {}
for i in range(2, len(str), 2):
v = int(str[i:i+2])
v_diff = v - int(str[i-2:i])
if v not in freq:
freq[v] = 0
freq[v] += 1
for k in freq:
print(k, freq[k])
"""
v = int(str)
while v:
x = v & 0xff
print(chr(x))
v >>= 8
print(v)""" | [
"[email protected]"
]
| |
51d60aa46fddbf0f3fe82bc84c9e6e73eba242fd | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_1/_pkg0_0_0_1_1/_mod0_0_0_1_1_4.py | 837b2352d702ad8b1907a0715fb60cbf0eaa700d | [
"Apache-2.0"
]
| permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 128 | py | name0_0_0_1_1_4_0 = None
name0_0_0_1_1_4_1 = None
name0_0_0_1_1_4_2 = None
name0_0_0_1_1_4_3 = None
name0_0_0_1_1_4_4 = None | [
"[email protected]"
]
| |
4bffa371b0f85e0309c820ac059e41d57de17199 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py | 3dc6aadd0b7a2f0ce6914768e70f511615cc8346 | [
"Apache-2.0"
]
| permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 4,782 | py | from __future__ import absolute_import
from celery.backends import couchbase as module
from celery.backends.couchbase import CouchBaseBackend
from celery.exceptions import ImproperlyConfigured
from celery import backends
from celery.tests.case import (
AppCase, MagicMock, Mock, SkipTest, patch, sentinel,
)
try:
import couchbase
except ImportError:
couchbase = None # noqa
COUCHBASE_BUCKET = 'celery_bucket'
class test_CouchBaseBackend(AppCase):
def setup(self):
if couchbase is None:
raise SkipTest('couchbase is not installed.')
self.backend = CouchBaseBackend(app=self.app)
def test_init_no_couchbase(self):
"""test init no couchbase raises"""
prev, module.couchbase = module.couchbase, None
try:
with self.assertRaises(ImproperlyConfigured):
CouchBaseBackend(app=self.app)
finally:
module.couchbase = prev
def test_init_no_settings(self):
"""test init no settings"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = []
with self.assertRaises(ImproperlyConfigured):
CouchBaseBackend(app=self.app)
def test_init_settings_is_None(self):
"""Test init settings is None"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None
CouchBaseBackend(app=self.app)
def test_get_connection_connection_exists(self):
with patch('couchbase.connection.Connection') as mock_Connection:
self.backend._connection = sentinel._connection
connection = self.backend._get_connection()
self.assertEqual(sentinel._connection, connection)
self.assertFalse(mock_Connection.called)
def test_get(self):
"""test_get
CouchBaseBackend.get should return and take two params
db conn to couchbase is mocked.
TODO Should test on key not exists
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
x = CouchBaseBackend(app=self.app)
x._connection = Mock()
mocked_get = x._connection.get = Mock()
mocked_get.return_value.value = sentinel.retval
# should return None
self.assertEqual(x.get('1f3fab'), sentinel.retval)
x._connection.get.assert_called_once_with('1f3fab')
def test_set(self):
"""test_set
CouchBaseBackend.set should return None and take two params
db conn to couchbase is mocked.
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None
x = CouchBaseBackend(app=self.app)
x._connection = MagicMock()
x._connection.set = MagicMock()
# should return None
self.assertIsNone(x.set(sentinel.key, sentinel.value))
def test_delete(self):
"""test_delete
CouchBaseBackend.delete should return and take two params
db conn to couchbase is mocked.
TODO Should test on key not exists
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
x = CouchBaseBackend(app=self.app)
x._connection = Mock()
mocked_delete = x._connection.delete = Mock()
mocked_delete.return_value = None
# should return None
self.assertIsNone(x.delete('1f3fab'))
x._connection.delete.assert_called_once_with('1f3fab')
def test_config_params(self):
"""test_config_params
celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set
"""
self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {
'bucket': 'mycoolbucket',
'host': ['here.host.com', 'there.host.com'],
'username': 'johndoe',
'password': 'mysecret',
'port': '1234',
}
x = CouchBaseBackend(app=self.app)
self.assertEqual(x.bucket, 'mycoolbucket')
self.assertEqual(x.host, ['here.host.com', 'there.host.com'],)
self.assertEqual(x.username, 'johndoe',)
self.assertEqual(x.password, 'mysecret')
self.assertEqual(x.port, 1234)
def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'):
from celery.backends.couchbase import CouchBaseBackend
backend, url_ = backends.get_backend_by_url(url, self.app.loader)
self.assertIs(backend, CouchBaseBackend)
self.assertEqual(url_, url)
def test_backend_params_by_url(self):
url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
with self.Celery(backend=url) as app:
x = app.backend
self.assertEqual(x.bucket, 'mycoolbucket')
self.assertEqual(x.host, 'myhost')
self.assertEqual(x.username, 'johndoe')
self.assertEqual(x.password, 'mysecret')
self.assertEqual(x.port, 123)
| [
"[email protected]"
]
| |
3264b1c0d30de6484403a37d07b39896f9a20180 | ee4768fe781f5099e4fee5a5d6d1b53146d21f80 | /src/pyutil/zeroconf.py | 8cd6428b09322cc329a3a760cb0492c81d261946 | [
"LicenseRef-scancode-public-domain",
"Unlicense",
"CC0-1.0",
"BSD-3-Clause"
]
| permissive | nuin/ampify | e55eff2953ae25907df52a909ecb7be7e468c9ae | dd3ed2eece37652e604f223658c028e01e6bdfa3 | refs/heads/master | 2021-01-15T23:50:50.748938 | 2011-01-29T04:36:41 | 2011-01-29T04:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | # Public Domain (-) 2010-2011 The Ampify Authors.
# See the Ampify UNLICENSE file for details.
"""
================
ZeroConf Support
================
This module provides support functions to register and query ZeroConf records.
The ``register`` function returns either a ``1`` or a ``0`` to indicate a
successful or failed registration.
>>> register('foo', '_test._tcp', 1234)
1
And, similarly, the ``query`` function can be used to find registrations for a
given ``regtype``. It takes an optional ``timeout`` value (in seconds) as a
second parameter, e.g.
>>> query('_test._tcp', 1.0)
{u'foo._test._tcp.local.': {...'port': 1234...}}
"""
import atexit
import threading
from select import select
from time import time
try:
import pybonjour
except Exception:
pybonjour = None
state = threading.local()
state.announce = None
state.query = None
state.current = None
state.timeout = None
def registration_callback(sdRef, flags, errorCode, name, regtype, domain):
if errorCode == pybonjour.kDNSServiceErr_NoError:
state.announce = 1
else:
state.announce = 0
def register(name, regtype, port):
if not pybonjour:
return
sdRef = pybonjour.DNSServiceRegister(
name=name, regtype=regtype, port=port, callBack=registration_callback
)
try:
while 1:
ready = select([sdRef], [], [])
if sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(sdRef)
return state.announce
finally:
state.announce = None
atexit.register(sdRef.close)
def resolve_callback(
sdRef, flags, interfaceIndex, errorCode, fullname, hosttarget, port,
txtRecord
):
if errorCode == pybonjour.kDNSServiceErr_NoError:
record = state.query[fullname] = state.current
record['host'] = hosttarget
record['port'] = port
def query_callback(
sdRef, flags, interfaceIndex, errorCode, serviceName, regtype, replyDomain
):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
return
if state.timeout:
timeout = state.timeout
else:
timeout = None
state.current = {
'name': serviceName,
'type': regtype
}
sdRef = pybonjour.DNSServiceResolve(
0, interfaceIndex, serviceName, regtype, replyDomain, resolve_callback
)
try:
while 1:
ready = select([sdRef], [], [], timeout)
if sdRef not in ready[0]:
break
return pybonjour.DNSServiceProcessResult(sdRef)
finally:
state.current = None
sdRef.close()
def query(regtype, timeout=5.0):
if not pybonjour:
return {}
sdRef = pybonjour.DNSServiceBrowse(regtype=regtype, callBack=query_callback)
start = time()
if timeout:
state.timeout = timeout
state.query = {}
try:
while (time() - start) <= timeout:
ready = select([sdRef], [], [], timeout)
if sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(sdRef)
return state.query
finally:
state.query = None
state.timeout = None
sdRef.close()
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE)
| [
"[email protected]"
]
| |
4e3bed99e1ea699ca9f133ea0ba788cc2e25882b | 2451ca9bc9ae43bd3b070fa362aa13646ff06f13 | /03_Standard_Library/unittest[Unit_testing_framework]/_note_unittest.py | 6c98c6865211e88f416708deb5099c1269bdaaf9 | []
| no_license | MacHu-GWU/six-demon-bag | 5cd1cf5d56d4c42cff013ab80dd4fc838add7195 | 10d772d6b876086f64db39f6ddbc07e08e35a122 | refs/heads/master | 2020-05-17T17:26:15.961833 | 2015-09-15T21:50:20 | 2015-09-15T21:50:20 | 26,669,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | ##################################
#encoding=utf8 #
#version =py27, py33 #
#author =sanhe #
#date =2014-11-15 #
# #
# (\ (\ #
# ( -.-)o I am a Rabbit! #
# o_(")(") #
# #
##################################
"""
Ref = https://docs.python.org/2/library/unittest.html#basic-example
useful method:
assertEqual to check for an expected result
assertTrue to verify a condition
assertRaises verify that an expected exception gets raised
"""
from __future__ import print_function
import random
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
# should raise an exception for an immutable sequence
self.assertRaises(TypeError, random.shuffle, (1,2,3))
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
def test_sample(self):
with self.assertRaises(ValueError):
random.sample(self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
5d72e5ef4d774b54baceacc28da305d03a59b235 | c499492bec3337319e9b186645d19edd019ca221 | /raSAT-0.2/starexec_run_default_0.2.py | b0f9baa22c4b3e4c3d145af736cd0a106ef30f92 | []
| no_license | tungvx/smt_test_tools | 6f60e186f86120d35c9d1479333de179a9296d96 | f466b0faa4a90363c905f9acba3f7f5fbf11427e | refs/heads/master | 2020-12-06T13:12:29.198696 | 2017-12-25T06:03:57 | 2017-12-25T06:03:57 | 67,702,683 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | #!/usr/bin/env python
# Run: python smt.py filename.smt2 timeout
# timeout is in seconds
import os
import subprocess
import sys
import stat
import time
current_path = os.path.dirname(os.path.realpath(__file__))
def remove_tmp (filename, version):
try:
os.remove(filename + '.' + version + '.tmp')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.out')
except OSError:
pass
try:
os.remove(os.path.splitext(filename)[0] + '.' + version + '.in')
except OSError:
pass
def run_raSAT (filename, bounds, sbox, timeout):
startTime = time.time()
raSATResult = "unknown"
# remove tmps files:
remove_tmp(filename, "0.2")
subprocess.call([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
try:
with open(filename + '.0.2.tmp', 'r') as outfile:
raSATResult = outfile.read().rstrip()
outfile.close()
if raSATResult == "unknown":
sbox /= 10
remove_tmp(filename, "0.2")
proc2 = subprocess.Popen([os.path.join(current_path, "./raSAT-0.2"), filename, bounds, 'sbox=' + str(sbox), 'tout=' + str(timeout-(time.time() - startTime))])
except IOError:
pass
return raSATResult, sbox
def run(filename, initLowerBound, initUpperBound, sbox, timeout):
lowerBound = initLowerBound
upperBound = initUpperBound
raSATResult = "unknown"
startTime = time.time()
while (raSATResult == 'unknown'):
(raSATResult, sbox) = run_raSAT(filename, 'lb=' + str(lowerBound) + ' ' + str(upperBound), sbox, timeout - (time.time() - startTime))
if raSATResult == 'unsat':
(raSATResult, sbox) = run_raSAT(filename, 'lb=-inf inf', sbox, timeout - (time.time() - startTime))
print (raSATResult)
# remove tmps files:
remove_tmp(filename, "0.2")
# get timeout from environment
timeout = float(os.environ.get('STAREXEC_CPU_LIMIT'))
run(sys.argv[1], -10, 10, 0.1, timeout) | [
"[email protected]"
]
| |
69be6a4feb0fe66a029a87ff314c6f77dd9fb8ff | 39de3097fb024c67a00c8d0e57c937d91f8b2cc9 | /Graphs/Good_graphs.py | b9f3b6c78d943e723f30cb7b4fcce9287c4d89f0 | []
| no_license | srajsonu/InterviewBit-Solution-Python | 4f41da54c18b47db19c3c0ad0e5efa165bfd0cd0 | 6099a7b02ad0d71e08f936b7ac35fe035738c26f | refs/heads/master | 2023-03-07T05:49:15.597928 | 2021-02-24T18:20:07 | 2021-02-24T18:20:07 | 249,359,666 | 0 | 2 | null | 2020-10-06T10:54:07 | 2020-03-23T07:09:53 | Python | UTF-8 | Python | false | false | 849 | py | class DSU:
def __init__(self,m):
self.parent=[i for i in range(m+1)]
self.height=[0 for _ in range(m+1)]
self.ans=m
def find_root(self,A):
if self.parent[A]==A:
return A
return self.find_root(self.parent[A])
def Union(self,A,B):
C=self.find_root(A)
D=self.find_root(B)
if C==D:
return
if self.height[C] < self.height[D]:
C,D = D,C
self.parent[D]=C
if self.height[C]==self.height[D]:
self.height[C]+=1
self.ans-=1
class Solution:
def Solve(self,A):
m=len(A)
dsu=DSU(m)
for i in range(m):
if A[i] != 1:
dsu.Union(i+1,A[i])
else:
dsu.ans-=1
return dsu.ans
A=[1,2,1,2]
B=Solution()
print(B.Solve(A))
| [
"[email protected]"
]
| |
8befe5a68bd5acbf2c15b8f57452161d188e1aa6 | b8bd4fa3b9d7a39c13f4d897e71f933ceb542cbd | /examples/beta_bernoulli_tf.py | ddf9eaef35b18932c469810138e071ac69c30a41 | [
"Apache-2.0"
]
| permissive | chagge/edward | 0b481b8de08ddc1e4be6ea1d868e5c8c0da0bb06 | 1e5563eba7bad5d6338615dff9dfcceef2cd436f | refs/heads/master | 2021-01-17T02:15:00.119122 | 2016-05-12T01:27:47 | 2016-05-12T07:04:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python
"""
A simple example from Stan. The model is written in TensorFlow.
Probability model
Prior: Beta
Likelihood: Bernoulli
Variational model
Likelihood: Mean-field Beta
"""
import edward as ed
import tensorflow as tf
from edward.stats import bernoulli, beta
from edward.variationals import Variational, Beta
class BetaBernoulli:
"""
p(x, z) = Bernoulli(x | z) * Beta(z | 1, 1)
"""
def __init__(self):
self.num_vars = 1
def log_prob(self, xs, zs):
log_prior = beta.logpdf(zs, a=1.0, b=1.0)
log_lik = tf.pack([tf.reduce_sum(bernoulli.logpmf(xs, z)) \
for z in tf.unpack(zs)])
return log_lik + log_prior
ed.set_seed(42)
model = BetaBernoulli()
variational = Variational()
variational.add(Beta(model.num_vars))
data = ed.Data(tf.constant((0, 1, 0, 0, 0, 0, 0, 0, 0, 1), dtype=tf.float32))
inference = ed.MFVI(model, variational, data)
inference.run(n_iter=10000)
| [
"[email protected]"
]
| |
ca2d3bcceb6ab5411980dd400babb51d77d2eb51 | 6249a81e81c3b3b37e6d03cd7112e9a981cec8e2 | /python/jobovy/apogee/setup.py | ee1fa5ae558eecf49e17c2bed29820ca33297774 | [
"BSD-3-Clause"
]
| permissive | dnidever/apogee | e883c7d352abb1b99c938f7de38313b5cd4d2164 | 83ad7496a0b4193df9e2c01b06dc36cb879ea6c1 | refs/heads/master | 2021-07-06T22:23:13.839478 | 2020-08-08T22:42:42 | 2020-08-08T22:42:42 | 149,676,202 | 0 | 0 | BSD-3-Clause | 2018-09-20T22:07:43 | 2018-09-20T22:07:43 | null | UTF-8 | Python | false | false | 5,445 | py | import os
from setuptools import setup #, Extension
import sys
import shutil
import subprocess
import tempfile
long_description = "Tools for APOGEE data analysis; see `here <https://github.com/jobovy/apogee>`__ for further documentation"
# Install FERRE when specifying --install-ferre; needs a FORTRAN compiler, e.g., http://hpc.sourceforge.net/
try:
ferre_pos= sys.argv.index('--install-ferre')
except ValueError:
_INSTALL_FERRE= False
else:
del sys.argv[ferre_pos]
_INSTALL_FERRE= True
try:
ferre_openmp= sys.argv.index('--ferre-noopenmp')
except ValueError:
_FERRE_NO_OPENMP= False
else:
del sys.argv[ferre_openmp]
_FERRE_NO_OPENMP= True
try:
ferre_flen= sys.argv.index('--ferre-flen')
except ValueError:
_FERRE_FLEN= 180
else:
_FERRE_FLEN= int(sys.argv[ferre_flen+1])
del sys.argv[ferre_flen]
del sys.argv[ferre_flen]
if _INSTALL_FERRE:
# Code to determine the binary install directory, from http://jasonstitt.com/setuptools-bin-directory
from setuptools import Distribution
from setuptools.command.install import install
class OnlyGetScriptPath(install):
def run(self):
self.distribution.install_scripts = self.install_scripts
def get_setuptools_script_dir():
" Get the directory setuptools installs scripts to for current python "
dist = Distribution({'cmdclass': {'install': OnlyGetScriptPath}})
dist.dry_run = True # not sure if necessary
dist.parse_config_files()
command = dist.get_command_obj('install')
command.ensure_finalized()
command.run()
return dist.install_scripts
if _INSTALL_FERRE:
# Download the code
#_FERRE_FILE= 'ferre_4.5.6.tar.gz'
_FERRE_FILE= 'ferre_4.6.6.tar.gz'
#_FERRE_URL= 'http://leda.as.utexas.edu/ferre/%s' % _FERRE_FILE
_FERRE_URL= 'http://www.as.utexas.edu/~hebe/ferre/%s' % _FERRE_FILE
print('\033[1m'+"Downloading and installing FERRE from %s ..." % _FERRE_URL +'\033[0m')
# Create temporary directory
tmpdir= tempfile.mkdtemp(dir='./')
os.mkdir(os.path.join(tmpdir,'ferre'))
try:
subprocess.check_call(['wget',_FERRE_URL,'-O',
os.path.join(tmpdir,'ferre',_FERRE_FILE)])
except subprocess.CalledProcessError:
print('\033[1m'+"Downloading FERRE from %s failed ..." % _FERRE_URL +'\033[0m')
# Unpack and install
os.chdir(os.path.join(tmpdir,'ferre'))
try:
subprocess.check_call(['tar','xvzf',_FERRE_FILE])
except subprocess.CalledProcessError:
print('\033[1m'+"Untarring/gunzipping FERRE failed ..." % _FERRE_URL +'\033[0m')
os.chdir('src')
# Change flen in share.f90
with open("tmp.f90", "w") as fout:
with open("share.f90", "r") as fin:
for line in fin:
fout.write(line.replace('flen=120','flen=%i' % _FERRE_FLEN))
os.rename('tmp.f90','share.f90')
# Change output format in ferre.f90
with open("tmp.f90", "w") as fout:
with open("ferre.f90", "r") as fin:
for line in fin:
fout.write(line.replace("write(3,'(1x,a30,100(1x,F9.3))')",
"write(3,'(1x,a40,100(1x,F9.4))')"))
os.rename('tmp.f90','ferre.f90')
try:
if _FERRE_NO_OPENMP:
subprocess.check_call(['make','OPT=-O2'])
else:
subprocess.check_call(['make'])
except subprocess.CalledProcessError:
print('\033[1m'+"Compiling FERRE failed ..." % _FERRE_URL +'\033[0m')
os.rename('a.out','../../../ferre')
os.rename('ascii2bin','../../../ascii2bin')
# Remove everything
os.chdir('../../../')
try:
subprocess.check_call(['rm','-rf',tmpdir])
except subprocess.CalledProcessError:
print('\033[1m'+"Removing FERRE temporary files failed ..." % _FERRE_URL +'\033[0m')
shutil.copy('ferre',get_setuptools_script_dir())
shutil.copy('ascii2bin',get_setuptools_script_dir())
setup(name='apogee',
version='1.',
description='APOGEE data tools',
author='Jo Bovy',
author_email='[email protected]',
license='New BSD',
long_description=long_description,
url='https://github.com/jobovy/apogee',
package_dir = {'apogee/': ''},
packages=['apogee','apogee/tools','apogee/select','apogee/test',
'apogee/util','apogee/samples','apogee/spec','apogee/modelatm',
'apogee/modelspec'],
package_data={'apogee/samples':['data/rcmodel_mode_jkz_ks_parsec_newlogg.sav',
'data/rcmodel_mode_jkz_h_parsec_newlogg.sav',
'data/rcmodel_mass_agez.sav',
'data/rcmodel_mass_agez_coarseage.sav',
'data/rcmodel_omega_agez.sav'],
'apogee/spec':['filter/dr12/*.filt',
'cannon/training/*.txt',
'cannon/trained/*.txt'],
'apogee/modelspec':['scripts/makemoogmodel.awk'],},
dependency_links = ['https://github.com/jobovy/galpy/tarball/master#egg=galpy',
'https://github.com/jobovy/isodist/tarball/master#egg=isodist'],
install_requires=['numpy','scipy','matplotlib',
'astropy','galpy',
'isodist','periodictable','tqdm']
)
| [
"[email protected]"
]
| |
6af1cb0e42170d903f01a9c7990a8f4ff4dc38c0 | 4e503761d091f3f284763d63c89861f6c26c1015 | /语法基础/jinzhi_base.py | ec273df6619f3f2be6874ed53805093eb88e0fd0 | []
| no_license | L-ingqin12/Algorithm_LanQiaobei | b2e08d755cacaaa0ff96108ca3f13d648b3b6fd7 | 9dd7b05eaf1cfc02eca52ee4f97466de961e592c | refs/heads/main | 2023-04-03T14:08:04.389344 | 2021-04-19T03:14:29 | 2021-04-19T03:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import math
import cmath
import sys
import string
import heapq
import bisect
from queue import Queue,PriorityQueue,LifoQueue
from collections import Counter,deque
from itertools import permutations,combinations
from functools import cmp_to_key
# str=input()
# print(int(str,16))
# a=int(str,16)
# print("{:0b}-666".format(a,2))
# print("{:0o}".format(a))
# print("{:0x}".format(a))#X
def baseN(num, b):
if num==0:
return "0"
else:
return (baseN(num // b, b).lstrip("0") + "0123456789ABCDEFghijklmnopqrstuvwxyz"[num % b])
#注意.lstrip("0") 记住就完了 就结合短除法理解下这个递归过程
#return ((num == 0) and "0") or (baseN(num // b, b).lstrip("0") + "0123456789ABCDEFghijklmnopqrstuvwxyz"[num % b])
if __name__=="__main__":
# n=int(input().strip())
# str1=input().strip()
# m=int(input().strip())
# temp=int(str1,n)
# print(baseN(temp,m))
print(baseN(0,16))
| [
"[email protected]"
]
| |
d6b75617ce95f97c556264d12c12a0ba8750dda0 | 63b4a698bc22fd54857c8fa097b1331f79c39e5a | /src/gdb/gdb-7.11/gdb/python/lib/gdb/printing.py | 63c3aeb23d8116dc0c49f1dc7acb7106131d869a | [
"Apache-2.0",
"LGPL-3.0-only",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-only",
"LGPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"AGPL-3.0-or-later"
]
| permissive | MinimSecure/unum-sdk | 67e04e2b230f9afb3ae328501a16afa4b94cdac6 | 30c63f0eccddba39a760671a831be3602842f3f4 | refs/heads/master | 2023-07-13T07:33:43.078672 | 2023-07-07T13:52:05 | 2023-07-07T13:52:05 | 143,209,329 | 33 | 22 | Apache-2.0 | 2021-03-05T22:56:59 | 2018-08-01T21:13:43 | C | UTF-8 | Python | false | false | 10,913 | py | # Pretty-printer utilities.
# Copyright (C) 2010-2016 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with pretty-printers."""
import gdb
import gdb.types
import re
import sys
if sys.version_info[0] > 2:
# Python 3 removed basestring and long
basestring = str
long = int
class PrettyPrinter(object):
"""A basic pretty-printer.
Attributes:
name: A unique string among all printers for the context in which
it is defined (objfile, progspace, or global(gdb)), and should
meaningfully describe what can be pretty-printed.
E.g., "StringPiece" or "protobufs".
subprinters: An iterable object with each element having a `name'
attribute, and, potentially, "enabled" attribute.
Or this is None if there are no subprinters.
enabled: A boolean indicating if the printer is enabled.
Subprinters are for situations where "one" pretty-printer is actually a
collection of several printers. E.g., The libstdc++ pretty-printer has
a pretty-printer for each of several different types, based on regexps.
"""
# While one might want to push subprinters into the subclass, it's
# present here to formalize such support to simplify
# commands/pretty_printers.py.
def __init__(self, name, subprinters=None):
self.name = name
self.subprinters = subprinters
self.enabled = True
def __call__(self, val):
# The subclass must define this.
raise NotImplementedError("PrettyPrinter __call__")
class SubPrettyPrinter(object):
"""Baseclass for sub-pretty-printers.
Sub-pretty-printers needn't use this, but it formalizes what's needed.
Attributes:
name: The name of the subprinter.
enabled: A boolean indicating if the subprinter is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def register_pretty_printer(obj, printer, replace=False):
"""Register pretty-printer PRINTER with OBJ.
The printer is added to the front of the search list, thus one can override
an existing printer if one needs to. Use a different name when overriding
an existing printer, otherwise an exception will be raised; multiple
printers with the same name are disallowed.
Arguments:
obj: Either an objfile, progspace, or None (in which case the printer
is registered globally).
printer: Either a function of one argument (old way) or any object
which has attributes: name, enabled, __call__.
replace: If True replace any existing copy of the printer.
Otherwise if the printer already exists raise an exception.
Returns:
Nothing.
Raises:
TypeError: A problem with the type of the printer.
ValueError: The printer's name contains a semicolon ";".
RuntimeError: A printer with the same name is already registered.
If the caller wants the printer to be listable and disableable, it must
follow the PrettyPrinter API. This applies to the old way (functions) too.
If printer is an object, __call__ is a method of two arguments:
self, and the value to be pretty-printed. See PrettyPrinter.
"""
# Watch for both __name__ and name.
# Functions get the former for free, but we don't want to use an
# attribute named __foo__ for pretty-printers-as-objects.
# If printer has both, we use `name'.
if not hasattr(printer, "__name__") and not hasattr(printer, "name"):
raise TypeError("printer missing attribute: name")
if hasattr(printer, "name") and not hasattr(printer, "enabled"):
raise TypeError("printer missing attribute: enabled")
if not hasattr(printer, "__call__"):
raise TypeError("printer missing attribute: __call__")
if hasattr(printer, "name"):
name = printer.name
else:
name = printer.__name__
if obj is None or obj is gdb:
if gdb.parameter("verbose"):
gdb.write("Registering global %s pretty-printer ...\n" % name)
obj = gdb
else:
if gdb.parameter("verbose"):
gdb.write("Registering %s pretty-printer for %s ...\n" % (
name, obj.filename))
# Printers implemented as functions are old-style. In order to not risk
# breaking anything we do not check __name__ here.
if hasattr(printer, "name"):
if not isinstance(printer.name, basestring):
raise TypeError("printer name is not a string")
# If printer provides a name, make sure it doesn't contain ";".
# Semicolon is used by the info/enable/disable pretty-printer commands
# to delimit subprinters.
if printer.name.find(";") >= 0:
raise ValueError("semicolon ';' in printer name")
# Also make sure the name is unique.
# Alas, we can't do the same for functions and __name__, they could
# all have a canonical name like "lookup_function".
# PERF: gdb records printers in a list, making this inefficient.
i = 0
for p in obj.pretty_printers:
if hasattr(p, "name") and p.name == printer.name:
if replace:
del obj.pretty_printers[i]
break
else:
raise RuntimeError("pretty-printer already registered: %s" %
printer.name)
i = i + 1
obj.pretty_printers.insert(0, printer)
class RegexpCollectionPrettyPrinter(PrettyPrinter):
"""Class for implementing a collection of regular-expression based pretty-printers.
Intended usage:
pretty_printer = RegexpCollectionPrettyPrinter("my_library")
pretty_printer.add_printer("myclass1", "^myclass1$", MyClass1Printer)
...
pretty_printer.add_printer("myclassN", "^myclassN$", MyClassNPrinter)
register_pretty_printer(obj, pretty_printer)
"""
class RegexpSubprinter(SubPrettyPrinter):
def __init__(self, name, regexp, gen_printer):
super(RegexpCollectionPrettyPrinter.RegexpSubprinter, self).__init__(name)
self.regexp = regexp
self.gen_printer = gen_printer
self.compiled_re = re.compile(regexp)
def __init__(self, name):
super(RegexpCollectionPrettyPrinter, self).__init__(name, [])
def add_printer(self, name, regexp, gen_printer):
"""Add a printer to the list.
The printer is added to the end of the list.
Arguments:
name: The name of the subprinter.
regexp: The regular expression, as a string.
gen_printer: A function/method that given a value returns an
object to pretty-print it.
Returns:
Nothing.
"""
# NOTE: A previous version made the name of each printer the regexp.
# That makes it awkward to pass to the enable/disable commands (it's
# cumbersome to make a regexp of a regexp). So now the name is a
# separate parameter.
self.subprinters.append(self.RegexpSubprinter(name, regexp,
gen_printer))
def __call__(self, val):
"""Lookup the pretty-printer for the provided value."""
# Get the type name.
typename = gdb.types.get_basic_type(val.type).tag
if not typename:
typename = val.type.name
if not typename:
return None
# Iterate over table of type regexps to determine
# if a printer is registered for that type.
# Return an instantiation of the printer if found.
for printer in self.subprinters:
if printer.enabled and printer.compiled_re.search(typename):
return printer.gen_printer(val)
# Cannot find a pretty printer. Return None.
return None
# A helper class for printing enum types. This class is instantiated
# with a list of enumerators to print a particular Value.
class _EnumInstance:
def __init__(self, enumerators, val):
self.enumerators = enumerators
self.val = val
def to_string(self):
flag_list = []
v = long(self.val)
any_found = False
for (e_name, e_value) in self.enumerators:
if v & e_value != 0:
flag_list.append(e_name)
v = v & ~e_value
any_found = True
if not any_found or v != 0:
# Leftover value.
flag_list.append('<unknown: 0x%x>' % v)
return "0x%x [%s]" % (int(self.val), " | ".join(flag_list))
class FlagEnumerationPrinter(PrettyPrinter):
"""A pretty-printer which can be used to print a flag-style enumeration.
A flag-style enumeration is one where the enumerators are or'd
together to create values. The new printer will print these
symbolically using '|' notation. The printer must be registered
manually. This printer is most useful when an enum is flag-like,
but has some overlap. GDB's built-in printing will not handle
this case, but this printer will attempt to."""
def __init__(self, enum_type):
super(FlagEnumerationPrinter, self).__init__(enum_type)
self.initialized = False
def __call__(self, val):
if not self.initialized:
self.initialized = True
flags = gdb.lookup_type(self.name)
self.enumerators = []
for field in flags.fields():
self.enumerators.append((field.name, field.enumval))
# Sorting the enumerators by value usually does the right
# thing.
self.enumerators.sort(key = lambda x: x[1])
if self.enabled:
return _EnumInstance(self.enumerators, val)
else:
return None
# Builtin pretty-printers.
# The set is defined as empty, and files in printing/*.py add their printers
# to this with add_builtin_pretty_printer.
_builtin_pretty_printers = RegexpCollectionPrettyPrinter("builtin")
register_pretty_printer(None, _builtin_pretty_printers)
# Add a builtin pretty-printer.
def add_builtin_pretty_printer(name, regexp, printer):
_builtin_pretty_printers.add_printer(name, regexp, printer)
| [
"[email protected]"
]
| |
c1b1dc0c1b17afb0bda43f4b02f94622623041a8 | 9398d8433fdb29ee630a6ee43a07bc36a2adbd88 | /ryu/__init__.py | e7cc55df725de6f5401b96f6663236b53a10e5d9 | []
| no_license | bopopescu/OpenStack_Liberty_Control | ca5a21d0c32c55dc8c517f5c7c9938ce575a4888 | 0f6ec1b4d38c47776fdf8935266bcaef2464af4c | refs/heads/master | 2022-12-03T10:41:53.210667 | 2016-03-29T06:25:58 | 2016-03-29T06:25:58 | 282,089,815 | 0 | 0 | null | 2020-07-24T01:04:15 | 2020-07-24T01:04:14 | null | UTF-8 | Python | false | false | 683 | py | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version_info = (3, 29, 1)
version = '.'.join(map(str, version_info))
| [
"[email protected]"
]
| |
dad949a14e690002447d02f8e29d60c18500099b | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtGui/QVector3D.py | b8973e32292f5f6d1dd2b4cfec809fcb2ed4014c | []
| no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,335 | py | # encoding: utf-8
# module PySide2.QtGui
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import Shiboken as __Shiboken
class QVector3D(__Shiboken.Object):
# no doc
def crossProduct(self, *args, **kwargs): # real signature unknown
pass
def distanceToLine(self, *args, **kwargs): # real signature unknown
pass
def distanceToPlane(self, *args, **kwargs): # real signature unknown
pass
def distanceToPoint(self, *args, **kwargs): # real signature unknown
pass
def dotProduct(self, *args, **kwargs): # real signature unknown
pass
def isNull(self, *args, **kwargs): # real signature unknown
pass
def length(self, *args, **kwargs): # real signature unknown
pass
def lengthSquared(self, *args, **kwargs): # real signature unknown
pass
def normal(self, *args, **kwargs): # real signature unknown
pass
def normalize(self, *args, **kwargs): # real signature unknown
pass
def normalized(self, *args, **kwargs): # real signature unknown
pass
def project(self, *args, **kwargs): # real signature unknown
pass
def setX(self, *args, **kwargs): # real signature unknown
pass
def setY(self, *args, **kwargs): # real signature unknown
pass
def setZ(self, *args, **kwargs): # real signature unknown
pass
def toPoint(self, *args, **kwargs): # real signature unknown
pass
def toPointF(self, *args, **kwargs): # real signature unknown
pass
def toTuple(self, *args, **kwargs): # real signature unknown
pass
def toVector2D(self, *args, **kwargs): # real signature unknown
pass
def toVector4D(self, *args, **kwargs): # real signature unknown
pass
def unproject(self, *args, **kwargs): # real signature unknown
pass
def x(self, *args, **kwargs): # real signature unknown
pass
def y(self, *args, **kwargs): # real signature unknown
pass
def z(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __idiv__(self, y): # real signature unknown; restored from __doc__
""" x.__idiv__(y) <==> x/=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
| [
"[email protected]"
]
| |
05959d27e7eaf9d04223342a556392e1d4a9b7ab | 3a476e0de377d1580facbfd78efdfbca009ed7a3 | /algo/zhang_ppo.py | b1600f08306144938cc2461e79e881d490bfa200 | [
"MIT"
]
| permissive | liuruoze/Thought-SC2 | b7366186dbb4494fabdb3e0104354665e21ff707 | b3cfbeffbfa09b952c596805d2006af24613db2d | refs/heads/master | 2023-04-28T11:47:56.771797 | 2021-01-15T00:25:26 | 2021-01-15T00:25:26 | 296,185,180 | 4 | 2 | MIT | 2023-04-24T09:06:48 | 2020-09-17T01:17:04 | Python | UTF-8 | Python | false | false | 13,421 | py | """
Implementation of PPO
ref: Schulman, John, et al. "Proximal policy optimization algorithms." arXiv preprint arXiv:1707.06347 (2017).
ref: https://github.com/Jiankai-Sun/Proximal-Policy-Optimization-in-Pytorch/blob/master/ppo.py
ref: https://github.com/openai/baselines/tree/master/baselines/ppo2
NOTICE:
`Tensor2` means 2D-Tensor (num_samples, num_dims)
"""
import gym
import torch
import torch.nn as nn
import torch.optim as opt
from torch import Tensor
from torch.autograd import Variable
from collections import namedtuple
from itertools import count
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from os.path import join as joindir
from os import makedirs as mkdir
import pandas as pd
import numpy as np
import argparse
import datetime
import math
Transition = namedtuple('Transition', ('state', 'value', 'action', 'logproba', 'mask', 'next_state', 'reward'))
EPS = 1e-10
RESULT_DIR = joindir('../result', '.'.join(__file__.split('.')[:-1]))
mkdir(RESULT_DIR, exist_ok=True)
class args(object):
env_name = 'Hopper-v2'
seed = 1234
num_episode = 2000
batch_size = 2048
max_step_per_round = 2000
gamma = 0.995
lamda = 0.97
log_num_episode = 1
num_epoch = 10
minibatch_size = 256
clip = 0.2
loss_coeff_value = 0.5
loss_coeff_entropy = 0.01
lr = 3e-4
num_parallel_run = 5
# tricks
schedule_adam = 'linear'
schedule_clip = 'linear'
layer_norm = True
state_norm = True
advantage_norm = True
lossvalue_norm = True
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, layer_norm=True):
super(ActorCritic, self).__init__()
self.actor_fc1 = nn.Linear(num_inputs, 64)
self.actor_fc2 = nn.Linear(64, 64)
self.actor_fc3 = nn.Linear(64, num_outputs)
self.actor_logstd = nn.Parameter(torch.zeros(1, num_outputs))
self.critic_fc1 = nn.Linear(num_inputs, 64)
self.critic_fc2 = nn.Linear(64, 64)
self.critic_fc3 = nn.Linear(64, 1)
if layer_norm:
self.layer_norm(self.actor_fc1, std=1.0)
self.layer_norm(self.actor_fc2, std=1.0)
self.layer_norm(self.actor_fc3, std=0.01)
self.layer_norm(self.critic_fc1, std=1.0)
self.layer_norm(self.critic_fc2, std=1.0)
self.layer_norm(self.critic_fc3, std=1.0)
@staticmethod
def layer_norm(layer, std=1.0, bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
def forward(self, states):
"""
run policy network (actor) as well as value network (critic)
:param states: a Tensor2 represents states
:return: 3 Tensor2
"""
action_mean, action_logstd = self._forward_actor(states)
critic_value = self._forward_critic(states)
return action_mean, action_logstd, critic_value
def _forward_actor(self, states):
x = torch.tanh(self.actor_fc1(states))
x = torch.tanh(self.actor_fc2(x))
action_mean = self.actor_fc3(x)
action_logstd = self.actor_logstd.expand_as(action_mean)
return action_mean, action_logstd
def _forward_critic(self, states):
x = torch.tanh(self.critic_fc1(states))
x = torch.tanh(self.critic_fc2(x))
critic_value = self.critic_fc3(x)
return critic_value
def select_action(self, action_mean, action_logstd, return_logproba=True):
"""
given mean and std, sample an action from normal(mean, std)
also returns probability of the given chosen
"""
action_std = torch.exp(action_logstd)
action = torch.normal(action_mean, action_std)
if return_logproba:
logproba = self._normal_logproba(action, action_mean, action_logstd, action_std)
return action, logproba
@staticmethod
def _normal_logproba(x, mean, logstd, std=None):
if std is None:
std = torch.exp(logstd)
std_sq = std.pow(2)
logproba = - 0.5 * math.log(2 * math.pi) - logstd - (x - mean).pow(2) / (2 * std_sq)
return logproba.sum(1)
def get_logproba(self, states, actions):
"""
return probability of chosen the given actions under corresponding states of current network
:param states: Tensor
:param actions: Tensor
"""
action_mean, action_logstd = self._forward_actor(states)
logproba = self._normal_logproba(actions, action_mean, action_logstd)
return logproba
class Memory(object):
def __init__(self):
self.memory = []
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self):
return Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
def ppo(args):
env = gym.make(args.env_name)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
env.seed(args.seed)
torch.manual_seed(args.seed)
network = ActorCritic(num_inputs, num_actions, layer_norm=args.layer_norm)
optimizer = opt.Adam(network.parameters(), lr=args.lr)
running_state = ZFilter((num_inputs,), clip=5.0)
# record average 1-round cumulative reward in every episode
reward_record = []
global_steps = 0
lr_now = args.lr
clip_now = args.clip
for i_episode in range(args.num_episode):
# step1: perform current policy to collect trajectories
# this is an on-policy method!
memory = Memory()
num_steps = 0
reward_list = []
len_list = []
while num_steps < args.batch_size:
state = env.reset()
if args.state_norm:
state = running_state(state)
reward_sum = 0
for t in range(args.max_step_per_round):
action_mean, action_logstd, value = network(Tensor(state).unsqueeze(0))
action, logproba = network.select_action(action_mean, action_logstd)
action = action.data.numpy()[0]
logproba = logproba.data.numpy()[0]
next_state, reward, done, _ = env.step(action)
reward_sum += reward
if args.state_norm:
next_state = running_state(next_state)
mask = 0 if done else 1
memory.push(state, value, action, logproba, mask, next_state, reward)
if done:
break
state = next_state
num_steps += (t + 1)
global_steps += (t + 1)
reward_list.append(reward_sum)
len_list.append(t + 1)
reward_record.append({
'episode': i_episode,
'steps': global_steps,
'meanepreward': np.mean(reward_list),
'meaneplen': np.mean(len_list)})
batch = memory.sample()
batch_size = len(memory)
# step2: extract variables from trajectories
rewards = Tensor(batch.reward)
values = Tensor(batch.value)
masks = Tensor(batch.mask)
actions = Tensor(batch.action)
states = Tensor(batch.state)
oldlogproba = Tensor(batch.logproba)
returns = Tensor(batch_size)
deltas = Tensor(batch_size)
advantages = Tensor(batch_size)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(batch_size)):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values[i]
# ref: https://arxiv.org/pdf/1506.02438.pdf (generalization advantage estimate)
advantages[i] = deltas[i] + args.gamma * args.lamda * prev_advantage * masks[i]
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
if args.advantage_norm:
advantages = (advantages - advantages.mean()) / (advantages.std() + EPS)
for i_epoch in range(int(args.num_epoch * batch_size / args.minibatch_size)):
# sample from current batch
minibatch_ind = np.random.choice(batch_size, args.minibatch_size, replace=False)
minibatch_states = states[minibatch_ind]
minibatch_actions = actions[minibatch_ind]
minibatch_oldlogproba = oldlogproba[minibatch_ind]
minibatch_newlogproba = network.get_logproba(minibatch_states, minibatch_actions)
minibatch_advantages = advantages[minibatch_ind]
minibatch_returns = returns[minibatch_ind]
minibatch_newvalues = network._forward_critic(minibatch_states).flatten()
ratio = torch.exp(minibatch_newlogproba - minibatch_oldlogproba)
surr1 = ratio * minibatch_advantages
surr2 = ratio.clamp(1 - clip_now, 1 + clip_now) * minibatch_advantages
loss_surr = - torch.mean(torch.min(surr1, surr2))
# not sure the value loss should be clipped as well
# clip example: https://github.com/Jiankai-Sun/Proximal-Policy-Optimization-in-Pytorch/blob/master/ppo.py
# however, it does not make sense to clip score-like value by a dimensionless clipping parameter
# moreover, original paper does not mention clipped value
if args.lossvalue_norm:
minibatch_return_6std = 6 * minibatch_returns.std()
loss_value = torch.mean((minibatch_newvalues - minibatch_returns).pow(2)) / minibatch_return_6std
else:
loss_value = torch.mean((minibatch_newvalues - minibatch_returns).pow(2))
loss_entropy = torch.mean(torch.exp(minibatch_newlogproba) * minibatch_newlogproba)
total_loss = loss_surr + args.loss_coeff_value * loss_value + args.loss_coeff_entropy * loss_entropy
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if args.schedule_clip == 'linear':
ep_ratio = 1 - (i_episode / args.num_episode)
clip_now = args.clip * ep_ratio
if args.schedule_adam == 'linear':
ep_ratio = 1 - (i_episode / args.num_episode)
lr_now = args.lr * ep_ratio
# set learning rate
# ref: https://stackoverflow.com/questions/48324152/
for g in optimizer.param_groups:
g['lr'] = lr_now
if i_episode % args.log_num_episode == 0:
print('Finished episode: {} Reward: {:.4f} total_loss = {:.4f} = {:.4f} + {} * {:.4f} + {} * {:.4f}' \
.format(i_episode, reward_record[-1]['meanepreward'], total_loss.data, loss_surr.data, args.loss_coeff_value,
loss_value.data, args.loss_coeff_entropy, loss_entropy.data))
print('-----------------')
return reward_record
def test(args):
record_dfs = []
for i in range(args.num_parallel_run):
args.seed += 1
reward_record = pd.DataFrame(ppo(args))
reward_record['#parallel_run'] = i
record_dfs.append(reward_record)
record_dfs = pd.concat(record_dfs, axis=0)
record_dfs.to_csv(joindir(RESULT_DIR, 'ppo-record-{}.csv'.format(args.env_name)))
if __name__ == '__main__':
for env in ['Walker2d-v2', 'Swimmer-v2', 'Hopper-v2', 'Humanoid-v2', 'HalfCheetah-v2', 'Reacher-v2']:
args.env_name = env
test(args)
| [
"[email protected]"
]
| |
d06c3fe68020ef60919224da60cbd77b2a0d58c0 | 12b401d5be9f5a1e1f60eb607f1796771deae085 | /application_play_game.py | 78325623113f39f07959a06d1fdcf3e6d58bfc12 | []
| no_license | chandraprakashh/machine_learning_code | 805355125f66cd03005fbc6bb134aeebf8a46c6a | 64679785d0ac8e231fd0a2d5386519f7e93eea82 | refs/heads/master | 2020-07-20T23:21:00.157810 | 2020-01-13T10:28:36 | 2020-01-13T10:28:36 | 206,724,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 11:35:02 2019
@author: Administrator
"""
import tkinter
window = tkinter.Tk()
window.title("GUI")
def PrintOnClick():
tkinter.Label(window, text = "welcome").pack()
tkinter.Button(window, text = "click me", command = PrintOnClick).pack()
window.mainloop() | [
"[email protected]"
]
| |
2111ba7c1c2e5eef8696d286f56b85cb87a8ffd3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_fencers.py | 458ff6e91aa5b2b77af88010dcaf87cfc6f9bd2a | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _FENCERS():
def __init__(self,):
self.name = "FENCERS"
self.definitions = fencer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['fencer']
| [
"[email protected]"
]
| |
920229c2be6c0e09a63d32d83e5be8973bb20dc8 | de4d88db6ea32d20020c169f734edd4b95c3092d | /aiotdlib/api/functions/process_push_notification.py | 339796d01d5927139d4d1b705af21fd64f6ce1e8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | thiagosm/aiotdlib | 5cc790a5645f7e4cc61bbd0791433ed182d69062 | 4528fcfca7c5c69b54a878ce6ce60e934a2dcc73 | refs/heads/main | 2023-08-15T05:16:28.436803 | 2021-10-18T20:41:27 | 2021-10-18T20:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | # =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ProcessPushNotification(BaseObject):
"""
Handles a push notification. Returns error with code 406 if the push notification is not supported and connection to the server is required to fetch new data. Can be called before authorization
:param payload: JSON-encoded push notification payload with all fields sent by the server, and "google.sent_time" and "google.notification.sound" fields added
:type payload: :class:`str`
"""
ID: str = Field("processPushNotification", alias="@type")
payload: str
@staticmethod
def read(q: dict) -> ProcessPushNotification:
return ProcessPushNotification.construct(**q)
| [
"[email protected]"
]
| |
1f361b50f9c5c862d5ed7da0bf89240bf1400f42 | 3d6b4aca5ef90dd65a2b40cf11fd8f84088777ab | /zounds/datasets/phatdrumloops.py | 55d1cfcad1b68dd4f92b6a20d307d0f4bb7c855e | [
"MIT"
]
| permissive | maozhiqiang/zounds | c3015f1bb58b835b5f8e9106518348442f86b0fc | df633399e7acbcbfbf5576f2692ab20d0501642e | refs/heads/master | 2020-04-19T22:41:42.540537 | 2019-01-31T01:13:45 | 2019-01-31T01:13:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | from zounds.soundfile import AudioMetaData
import requests
import re
import urlparse
class PhatDrumLoops(object):
"""
Produces an iterable of :class:`zounds.soundfile.AudioMetaData` instances
for every drum break from http://phatdrumloops.com/beats.php
Args:
attrs (dict): Extra properties to add to the :class:`AudioMetaData`
Examples
>>> from zounds import PhatDrumLoops
>>> pdl = PhatDrumLoops()
>>> iter(pdl).next()
{'description': None, 'tags': None, 'uri': <Request [GET]>, 'channels': None, 'licensing': None, 'samplerate': None}
See Also:
:class:`InternetArchive`
:class:`FreeSoundSearch`
:class:`zounds.soundfile.AudioMetaData`
"""
def __init__(self, **attrs):
super(PhatDrumLoops, self).__init__()
self.attrs = attrs
self.attrs.update(web_url='http://www.phatdrumloops.com/beats.php')
def __iter__(self):
resp = requests.get('http://phatdrumloops.com/beats.php')
pattern = re.compile('href="(?P<uri>/audio/wav/[^\.]+\.wav)"')
for m in pattern.finditer(resp.content):
url = urlparse.urljoin('http://phatdrumloops.com',
m.groupdict()['uri'])
request = requests.Request(
method='GET',
url=url,
headers={'Range': 'bytes=0-'})
yield AudioMetaData(uri=request, **self.attrs)
| [
"[email protected]"
]
| |
cbb8321e0fef6844c689fe05fcee4eaf4d7988e9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_051/ch120_2020_09_27_20_54_37_267942.py | 265ee47db4543afc45e1caee2a01fdff39a7ce64 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import random as rd
dinheiro=100
while dinheiro >0:
numero=rd.randint(1, 36)
if dinheiro == 0:
break
print ('voce tem', dinheiro, 'dinheiro')
valor=int(input('qunato quer apostar? '))
if valor == 0:
break
aposta=input('quer apostar em um numero ou em uma paridade? ')
if aposta == 'n':
casa=int(input('escolha um numero de 1 a 36: '))
if casa == numero:
dinheiro+=valor*35
else:
dinheiro-=valor
if aposta == 'p':
paridade=input('par ou impar: ')
if paridade == 'p':
if numero % 2 == 0:
dinheiro += valor
else:
dinheiro-=valor
else:
if numero % 2 != 0:
dinheiro+=valor
else:
dinheiro-=valor
if dinheiro == 0:
print ('acabou seu dinheiro')
else:
print ('Muito bem voce terminou com', dinheiro, 'dinheiro') | [
"[email protected]"
]
| |
e4dd4ae92970dcf7438e5e3e8e1d87c2c669e718 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02267/s160876142.py | cc65b83611da412d51a5be85eff336a19d9f1eec | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | N = int(input())
L = input().split()
n = int(input())
l = input().split()
dup = []
count = 0
for i in range(N):
for j in range(n):
if L[i] == l[j]:
dup.append(L[i])
break
dup = list(set(dup))
print(len(dup))
| [
"[email protected]"
]
| |
ebf8ee08be5d85beef798cd93010cce8cdfcb4f7 | 2dc50ddfb0a431a34867c8955a972e67870a2755 | /migrations/versions/4445080944ee_hidden_hosts_management.py | d853ad3d00f27794e4a672e9281631081a45b99b | [
"BSD-3-Clause"
]
| permissive | ziirish/burp-ui | d5aec06adb516eb26f7180f8e9305e12de89156c | 2b8c6e09a4174f2ae3545fa048f59c55c4ae7dba | refs/heads/master | 2023-07-19T23:05:57.646158 | 2023-07-07T18:21:34 | 2023-07-07T18:21:34 | 20,400,152 | 98 | 18 | BSD-3-Clause | 2023-05-02T00:31:27 | 2014-06-02T10:23:40 | Python | UTF-8 | Python | false | false | 875 | py | """hidden hosts management
Revision ID: 4445080944ee
Revises: 695dcbd29d4f
Create Date: 2018-10-03 11:47:20.028686
"""
# revision identifiers, used by Alembic.
revision = "4445080944ee"
down_revision = "695dcbd29d4f"
import sqlalchemy as sa
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"hidden",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user", sa.String(length=256), nullable=False),
sa.Column("client", sa.String(length=4096), nullable=True),
sa.Column("server", sa.String(length=4096), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("hidden")
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
74ca2b4a8a82aad877acfaf39298809830bd83a9 | d3e31f6b8da5c1a7310b543bbf2adc76091b5571 | /Day29/upload_file/upload_file/settings.py | dc5d94a6a1bf90cd9d6113c34d79017577181cc2 | []
| no_license | pytutorial/py2103 | 224a5a7133dbe03fc4f798408694bf664be10613 | adbd9eb5a32eb1d28b747dcfbe90ab8a3470e5de | refs/heads/main | 2023-07-14T06:31:18.918778 | 2021-08-12T14:29:16 | 2021-08-12T14:29:16 | 355,163,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | """
Django settings for upload_file project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-m&_n*uu56)_z%@jaq$1*z&yhs9ht56x!hk$lbehfee4c$j-ti9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'upload_file.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'upload_file.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# New
import os
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
] | [
"[email protected]"
]
| |
506ad115684ba8ed96c7300b40bb7467ba9114d6 | 2198abd6e37195dbb64b46effa11c6fad1de3b4e | /PyQuantum/Tools/CSV.py | 218dbc3802b453fa193aca8202b04c513040678a | [
"MIT"
]
| permissive | deyh2020/PyQuantum | 179b501bea74be54ccce547e77212c7e1f3cd206 | 78b09987cbfecf549e67b919bb5cb2046b21ad44 | refs/heads/master | 2022-03-24T08:11:50.950566 | 2020-01-05T02:07:59 | 2020-01-05T02:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | import pandas as pd
import csv
def list_to_csv(lst, filename):
df = pd.DataFrame(lst, columns=None)
df.to_csv(filename, index=None, header=False)
# def list_to_csv(lst, filename):
# df = pd.DataFrame(lst)
# df.to_csv(filename)
# def list_from_csv(filename):
# df = pd.read_csv(filename)
# lst = list(df.iloc[:, 1])
# return lst
def list_from_csv(filename):
rows = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
rows.append(row)
return rows
| [
"[email protected]"
]
| |
98ba07395607b0818853a04048c8dc3186048939 | 4e2117a4381f65e7f2bb2b06da800f40dc98fa12 | /165_RealtimeStereo/test_tflite.py | c9d58604e945ac4c6775f9f5e1b7419b476080c3 | [
"GPL-3.0-only",
"AGPL-3.0-only",
"LicenseRef-scancode-proprietary-license",
"MIT"
]
| permissive | PINTO0309/PINTO_model_zoo | 84f995247afbeda2543b5424d5e0a14a70b8d1f1 | ff08e6e8ab095d98e96fc4a136ad5cbccc75fcf9 | refs/heads/main | 2023-09-04T05:27:31.040946 | 2023-08-31T23:24:30 | 2023-08-31T23:24:30 | 227,367,327 | 2,849 | 520 | MIT | 2023-08-31T23:24:31 | 2019-12-11T13:02:40 | Python | UTF-8 | Python | false | false | 2,792 | py | from tensorflow.lite.python.interpreter import Interpreter
import cv2
import numpy as np
import time
class RealtimeStereo():
def __init__(self, model_path):
self.model = self.load_model(model_path)
def load_model(self, model_path):
self.interpreter = Interpreter(model_path, num_threads=4)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
input_shape = self.input_details[0]['shape']
self.input_height = input_shape[1]
self.input_width = input_shape[2]
self.channels = input_shape[2]
self.output_details = self.interpreter.get_output_details()
self.output_shape = self.output_details[0]['shape']
def preprocess(self, image):
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_input = cv2.resize(
img,
(self.input_width,self.input_height)
).astype(np.float32)
# Scale input pixel values to -1 to 1
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
img_input = ((img_input/ 255.0 - mean) / std)
# img_input = img_input.transpose(2, 0, 1)
img_input = img_input[np.newaxis,:,:,:]
return img_input.astype(np.float32)
def run(self, left, right):
input_left = self.preprocess(left)
input_right = self.preprocess(right)
self.interpreter.set_tensor(self.input_details[0]['index'], input_left)
self.interpreter.set_tensor(self.input_details[1]['index'], input_right)
self.interpreter.invoke()
disparity = self.interpreter.get_tensor(self.output_details[0]['index'])
return np.squeeze(disparity)
if __name__ == '__main__':
# model_path = 'saved_model/model_float32.tflite'
model_path = 'saved_model/model_float16_quant.tflite'
# model_path = 'saved_model/model_dynamic_range_quant.tflite'
realtimeStereo = RealtimeStereo(model_path)
img_left = cv2.imread('im0.png')
img_right = cv2.imread('im1.png')
start = time.time()
disp = realtimeStereo.run(img_left, img_right)
disp = cv2.resize(
disp,
(img_left.shape[1], img_left.shape[0]),
interpolation=cv2.INTER_LINEAR
).astype(np.float32)
img = (disp*256).astype('uint16')
cv2.imshow('disp', img)
d_min = np.min(disp)
d_max = np.max(disp)
depth_map = (disp - d_min) / (d_max - d_min)
depth_map = depth_map * 255.0
depth_map = np.asarray(depth_map, dtype="uint8")
depth_map = cv2.applyColorMap(depth_map, cv2.COLORMAP_JET)
end = time.time()
eslapse = end - start
print("depthmap : {}s".format(eslapse))
cv2.imwrite('result.jpg', depth_map)
cv2.imshow('output', depth_map)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"[email protected]"
]
| |
24e1af6e4014ee573951e1bbb70250d99347fcd8 | e89f44632effe9ba82b940c7721cad19a32b8a94 | /text2shorthand/shorthand/svsd/a.py | bf377f0363fe3f97c48628ff5aeb67c2e7de1eef | []
| no_license | Wyess/text2shorthand | 3bcdb708f1d7eeb17f9ae3181c4dd70c65c8986e | 5ba361c716178fc3b7e68ab1ae724a57cf3a5d0b | refs/heads/master | 2020-05-17T14:52:11.369058 | 2019-08-20T12:50:00 | 2019-08-20T12:50:00 | 183,776,467 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | from ..svsd.char import SvsdChar
from text2shorthand.common.point import Point as P, PPoint as PP
import pyx
from pyx.metapost.path import (
beginknot,
knot,
endknot,
smoothknot,
tensioncurve,
controlcurve,
curve)
class CharA(SvsdChar):
def __init__(self, name='a', kana='あ',
model='NE10', head_type='NE', tail_type='NE'):
super().__init__(name, kana, model, head_type, tail_type)
self.head_ligature = {}
self.tail_ligature = {'NE'}
@classmethod
def path_NE(cls, ta=None, **kwargs):
return pyx.path.line(0, 0, *PP(10, 30))
@classmethod
def path_NEe(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEer(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEne(cls, ta=None, **kwargs):
return cls.jog(cls.path_NE())
@classmethod
def path_NEner(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEnel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEs(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsl(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEse(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEser(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsel(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEsw(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEswr(cls, ta=None, **kwargs):
pass
@classmethod
def path_NEswl(cls, ta=None, **kwargs):
pass
| [
"[email protected]"
]
| |
8f6f69ccfeabf24952fcc568aea0bf80511103fe | b24ede8d5a45c2b22c7182bdb310f7617a29a365 | /driver/arguments.py | 32dd9655ffd0396436a3c70ebd69b9b10ee218bb | []
| no_license | marc-hanheide/blackboard_tools | e2ea46f3b8be862e377aa6b371266c11a8b1e7e6 | 309de57daff1fcce12c271d800b3eff4d72c557d | refs/heads/master | 2022-12-25T11:41:33.903686 | 2022-12-09T12:03:40 | 2022-12-09T12:03:40 | 34,859,021 | 3 | 0 | null | 2022-12-09T12:03:41 | 2015-04-30T14:32:05 | Python | UTF-8 | Python | false | false | 11,615 | py | # -*- coding: utf-8 -*-
import argparse
DESCRIPTION = """Fast Downward driver script.
Input files can be either a PDDL problem file (with an optional PDDL domain
file), in which case the driver runs all three planner components, or a SAS+
preprocessor output file, in which case the driver runs just the search
component. This default behaviour can be overridden with the options below.
Arguments given before the specified input files are interpreted by the driver
script ("driver options"). Arguments given after the input files are passed on
to the planner components ("component options"). In exceptional cases where no
input files are needed, use "--" to separate driver from component options. In
even more exceptional cases where input files begin with "--", use "--" to
separate driver options from input files and also to separate input files from
component options.
By default, component options are passed to the search component. Use
"--translate-options", "--preprocess-options" or "--search-options" within the
component options to override the default for the following options, until
overridden again. (See below for examples.)"""
EXAMPLES = [
("Translate and preprocess, then find a plan with A* + LM-Cut:",
["./fast-downward.py", "../benchmarks/gripper/prob01.pddl",
"--search", '"astar(lmcut())"']),
("Translate and preprocess, run no search:",
["./fast-downward.py", "--translate", "--preprocess",
"../benchmarks/gripper/prob01.pddl"]),
("Run the search component in debug mode (with assertions enabled):",
["./fast-downward.py", "--debug", "output", "--search", '"astar(ipdb())"']),
("Pass options to translator and search components:",
["./fast-downward.py", "../benchmarks/gripper/prob01.pddl",
"--translate-options", "--relaxed",
"--search-options", "--search", '"astar(lmcut())"']),
]
EPILOG = """component options:
--translate-options OPTION1 OPTION2 ...
--preprocess-options OPTION1 OPTION2 ...
--search-options OPTION1 OPTION2 ...
pass OPTION1 OPTION2 ... to specified planner component
(default: pass component options to search)
Examples:
%s
""" % "\n\n".join("%s\n%s" % (desc, " ".join(cmd)) for desc, cmd in EXAMPLES)
class RawHelpFormatter(argparse.HelpFormatter):
"""Preserve newlines and spacing."""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _format_args(self, action, default_metavar):
"""Show explicit help for remaining args instead of "..."."""
if action.nargs == argparse.REMAINDER:
return "INPUT_FILE1 [INPUT_FILE2] [COMPONENT_OPTION ...]"
else:
return argparse.HelpFormatter._format_args(self, action, default_metavar)
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index
def _split_off_filenames(planner_args):
"""Given the list of arguments to be passed on to the planner
components, split it into a prefix of filenames and a suffix of
options. Returns a pair (filenames, options).
If a "--" separator is present, the last such separator serves as
the border between filenames and options. The separator itself is
not returned. (This implies that "--" can be a filename, but never
an option to a planner component.)
If no such separator is present, the first argument that begins
with "-" and consists of at least two characters starts the list
of options, and all previous arguments are filenames."""
if "--" in planner_args:
separator_pos = _rindex(planner_args, "--")
num_filenames = separator_pos
del planner_args[separator_pos]
else:
num_filenames = 0
for arg in planner_args:
# We treat "-" by itself as a filename because by common
# convention it denotes stdin or stdout, and we might want
# to support this later.
if arg.startswith("-") and arg != "-":
break
num_filenames += 1
return planner_args[:num_filenames], planner_args[num_filenames:]
def _split_planner_args(parser, args):
"""Partition args.planner_args, the list of arguments for the
planner components, into args.filenames, args.translate_options,
arge.preprocess_options and args.search_options. Modifies args
directly and removes the original args.planner_args list."""
args.filenames, options = _split_off_filenames(args.planner_args)
args.translate_options = []
args.preprocess_options = []
args.search_options = []
curr_options = args.search_options
for option in options:
if option == "--translate-options":
curr_options = args.translate_options
elif option == "--preprocess-options":
curr_options = args.preprocess_options
elif option == "--search-options":
curr_options = args.search_options
else:
curr_options.append(option)
def _check_mutex_args(parser, args, required=False):
for pos, (name1, is_specified1) in enumerate(args):
for name2, is_specified2 in args[pos + 1:]:
if is_specified1 and is_specified2:
parser.error("cannot combine %s with %s" % (name1, name2))
if required and not any(is_specified for _, is_specified in args):
parser.error("exactly one of {%s} has to be specified" %
", ".join(name for name, _ in args))
def _looks_like_search_input(filename):
# We don't currently have a good way to distinguish preprocess and
# search inputs without going through most of the file, so we
# don't even try.
with open(filename) as input_file:
first_line = next(input_file, "").rstrip()
return first_line == "begin_version"
def _set_components_automatically(parser, args):
"""Guess which planner components to run based on the specified
filenames and set args.components accordingly. Currently
implements some simple heuristics:
1. If there is exactly one input file and it looks like a
Fast-Downward-generated file, run search only.
2. Otherwise, run all components."""
if len(args.filenames) == 1 and _looks_like_search_input(args.filenames[0]):
args.components = ["search"]
else:
args.components = ["translate", "preprocess", "search"]
def _set_components_and_inputs(parser, args):
"""Set args.components to the planner components to be run
and set args.translate_inputs, args.preprocess_input and
args.search_input to the correct input filenames.
Rules:
1. If any --run-xxx option is specified, then the union
of the specified components is run.
2. It is an error to specify running the translator and
search, but not the preprocessor.
3. If nothing is specified, use automatic rules. See
separate function."""
args.components = []
if args.translate or args.run_all:
args.components.append("translate")
if args.preprocess or args.run_all:
args.components.append("preprocess")
if args.search or args.run_all:
args.components.append("search")
if args.components == ["translate", "search"]:
parser.error("cannot run translator and search without preprocessor")
if not args.components:
_set_components_automatically(parser, args)
args.translate_inputs = []
args.preprocess_input = "output.sas"
args.search_input = "output"
assert args.components
first = args.components[0]
num_files = len(args.filenames)
# When passing --help to any of the components (or -h to the
# translator), we don't require input filenames and silently
# swallow any that are provided. This is undocumented to avoid
# cluttering the driver's --help output.
if first == "translate":
if "--help" in args.translate_options or "-h" in args.translate_options:
args.translate_inputs = []
elif num_files in [1, 2]:
args.translate_inputs = args.filenames
else:
parser.error("translator needs one or two input files")
elif first == "preprocess":
if "--help" in args.preprocess_options:
args.preprocess_input = None
elif num_files == 1:
args.preprocess_input, = args.filenames
else:
parser.error("preprocessor needs exactly one input file")
elif first == "search":
if "--help" in args.search_options:
args.search_input = None
elif num_files == 1:
args.search_input, = args.filenames
else:
parser.error("search needs exactly one input file")
else:
assert False, first
def parse_args(input_args):
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EPILOG,
formatter_class=RawHelpFormatter,
add_help=False)
help_options = parser.add_argument_group(
title=("driver options that show information and exit "
"(don't run planner)"))
# We manually add the help option because we want to control
# how it is grouped in the output.
help_options.add_argument(
"-h", "--help",
action="help", default=argparse.SUPPRESS,
help="show this help message and exit")
components = parser.add_argument_group(
title=("driver options selecting the planner components to be run\n"
"(may select several; default: auto-select based on input file(s))"))
components.add_argument(
"--run-all", action="store_true",
help="run all components of the planner")
components.add_argument(
"--translate", action="store_true",
help="run translator component")
components.add_argument(
"--preprocess", action="store_true",
help="run preprocessor component")
components.add_argument(
"--search", action="store_true",
help="run search component")
driver_other = parser.add_argument_group(
title="other driver options")
driver_other.add_argument(
"--debug", action="store_true",
help="use debug mode for search component")
driver_other.add_argument(
"--log-level", choices=["debug", "info", "warning"],
default="info",
help="set log level (most verbose: debug; least verbose: warning; default: %(default)s)")
driver_other.add_argument(
"--plan-file", metavar="FILE", default="sas_plan",
help="write plan(s) to FILE (default: %(default)s; anytime configurations append .1, .2, ...)")
components.add_argument(
"--cwd", metavar="DIR", default=".",
help="run everything in cwd to DIR (default: %(default)s)")
parser.add_argument(
"planner_args", nargs=argparse.REMAINDER,
help="file names and options passed on to planner components")
# Using argparse.REMAINDER relies on the fact that the first
# argument that doesn't belong to the driver doesn't look like an
# option, i.e., doesn't start with "-". This is usually satisfied
# because the argument is a filename; in exceptional cases, "--"
# can be used as an explicit separator. For example, "./fast-downward.py --
# --help" passes "--help" to the search code.
args = parser.parse_args(input_args)
_split_planner_args(parser, args)
_set_components_and_inputs(parser, args)
return args
| [
"[email protected]"
]
| |
c162fe99b26ec56068d61a2d04055ad5804cbeaf | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /vYYfFAAfjoc8crCqu_5.py | c815b8937d476932feaa5ae13066bc8fc49dac5e | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py |
def tree(h):
ans = []
row_len = 2*h-1
for i in range(1, row_len+1, 2):
n = (row_len - i)//2
row = ' '*n + '#'*i + ' '*n
ans.append(row)
return ans
| [
"[email protected]"
]
| |
a4a300afe9003b66876d7c1ee2857fec4542e32e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/4/usersdata/145/3314/submittedfiles/swamee.py | 9de554dec9e9faab31dbced293cb3abc50b8bb40 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#entrada
f=input('digite um valor de f:')
L=input('digite um valor de L:')
Q=input('digite um valor de Q:')
deltaH=input('digite um valor de deltaH:')
V=input('digite um valor de v:')
#processamento
g=9,81
e=0,000002
D=(8*f*L*(Q**2)/((math.pi**2)*g*deltaH))**0.2
Rey=(4*Q)/(math.pi*D*V))
k=0.25/(math.log10((e/(3.7*D))+(5.74/(Rey**0.9))))**2
print('D=%.4f'%D)
print('Rey=%.4f'%Rey)
print('k=%.4f'%k) | [
"[email protected]"
]
| |
1ee2b14448aace3b16487246053c468adc039ba6 | 78f1cc341cd6313d02b34d910eec4e9b2745506a | /p02_personal_summary/p13_lee_yung_seong/p03_week/p02_thursday/C5.11.py | 6bb587db4fe5e5cc69b337b6bfe6754cab7e0d67 | []
| no_license | python-cookbook/PythonStudy | a4855621d52eae77537bffb01aae7834a0656392 | cdca17e9734479c760bef188dcb0e183edf8564a | refs/heads/master | 2021-01-20T17:23:35.823875 | 2017-07-30T10:56:13 | 2017-07-30T10:56:13 | 90,873,920 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | #경로 다루기
#문제
#기본 파일 이름, 디렉터리 이름, 절대 경로 등을 찾기 위해 경로를 다루어야 한다.
##해결
#경로를 다루기 위해서 os.path 모듈의 함수를 사용한다. 몇몇 기능을 예제를 통해 살펴보자.
import os
path = '/users/beazley/Data/data.csv'
os.path.basename(path)#경로의 마지막 부분
#디렉터리 이름
os.path.dirname(path)
#합치기
os.path.join('tmp','data',os.path.basename(path))
#사용자의 홈 디렉토리 펼치기
path = '~/Data/data.csv'
os.path.expanduser(path)
#파일 확장자 나누기
os.path.splitext(path)
#토론
#파일 이름을 다루기 위해서 문자열에 관련된 코드를 직접 작성하지 말고 os.path 모듈을 사용해야 한다. 이는 이식성과도 어느 정도 관련이 있다.
#os path 모듈은 unix와 윈도우의 차이점을 알고 자동으로 처리한다. | [
"[email protected]"
]
| |
aea80bf4ca2d9742e5bb0dc0e1f750e6e39a75b0 | 78da694dc955639c5a9f64e2d83acee4d13fd931 | /socialadmin/admin.py | bda4a33cb121ea5517b5a0e1bdd106a82d435037 | []
| no_license | toluwanicareer/kemibox | f255e73f71c824e780d528e47f37ec7ebca35f60 | 641808e70545826c536ed4062276b129414c2c04 | refs/heads/master | 2020-03-12T17:04:55.299158 | 2018-04-24T16:12:52 | 2018-04-24T16:12:52 | 130,729,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import KemiBox, Post
# Register your models here.
admin.site.register(KemiBox )
admin.site.register(Post) | [
"[email protected]"
]
| |
52acd72b04938eb85d82c661482aa54387aac381 | d65128e38be0243f279e0d72ef85e7d3c5e116ca | /base/site-packages/django/bin/daily_cleanup.py | 5a2ce210f403ce4de577568cf3de95ebcfb94d42 | [
"Apache-2.0"
]
| permissive | ZxwZero/fastor | 19bfc568f9a68f1447c2e049428330ade02d451d | dd9e299e250362802032d1984801bed249e36d8d | refs/heads/master | 2021-06-26T06:40:38.555211 | 2021-06-09T02:05:38 | 2021-06-09T02:05:38 | 229,753,500 | 1 | 1 | Apache-2.0 | 2019-12-23T12:59:25 | 2019-12-23T12:59:24 | null | UTF-8 | Python | false | false | 441 | py | #!/usr/bin/env python
"""
Daily cleanup job.
Can be run as a cronjob to clean out old data from the database (only expired
sessions at the moment).
"""
import warnings
from django.core import management
if __name__ == "__main__":
warnings.warn(
"The `daily_cleanup` script has been deprecated "
"in favor of `django-admin.py clearsessions`.",
DeprecationWarning)
management.call_command('clearsessions')
| [
"[email protected]"
]
| |
ad9951dd21e5e5802f28c37bba8d655b7c3d1314 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/nallo/problemB.py | c7cee17f3977f567c41df66e67f27d3beef12abb | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,447 | py | """
se inizio con -
cerca il primo +
se trovo il +
cambia tutti i - in + fino al + trovato compreso
mosse++
se non trovo il +
sol = mosse + 1
se inizio con +
cerca il primo -
se trovo il -
cambia tutti i + in - fino al - trovato compreso
mosse++
se non trovo il meno
sol = mosse
"""
def solve(test_case):
s = raw_input()
moves = 0
# print "Start: " + s
while 1:
if s[0]=='-':
plus_index = s.find("+")
if plus_index!=-1:
# cambia tutti i - in + fino al + trovato compreso
replacing_str = plus_index * "+"
s = replacing_str + s[plus_index:]
# print "Debug: " + s
moves += 1
else:
print "Case #" + str(test_case) + ": " + str(moves+1)
return
else:
minus_index = s.find("-")
if minus_index!=-1:
# cambia tutti i + in - fino al - trovato compreso
replacing_str = minus_index * "-"
s = replacing_str + s[minus_index:]
# print "Debug: " + s
moves += 1
else:
print "Case #" + str(test_case) + ": " + str(moves)
return
def main():
t = int(raw_input())
for i in xrange(t):
solve(i+1)
if __name__=="__main__":
main()
| [
"[[email protected]]"
]
| |
0eb8edd55910aa04bea4ace48cfb3159cb268fc7 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/SequentialDigits.py | 426eff307728f7e423f3c99f70c5007a2d736e4c | []
| no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | """
An integer has sequential digits if and only if each digit in the number is one more than the previous digit.
Return a sorted list of all the integers in the range [low, high] inclusive that have sequential digits.
Example 1:
Input: low = 100, high = 300
Output: [123,234]
Example 2:
Input: low = 1000, high = 13000
Output: [1234,2345,3456,4567,5678,6789,12345]
Constraints:
10 <= low <= high <= 10^9
"""
class Solution:
def sequentialDigits(self, low: int, high: int):
str_low = str(low)
str_high = str(high)
res = []
digits = len(str_low)
digits_high = len(str_high) + 1
start_num = 0
while digits < digits_high:
for start in range(1,10):
if start + digits <= 10:
num = int(''.join([str(i) for i in range(start, start+digits)]))
if num >= low and num <= high:
res.append(num)
else:
break
digits += 1
return res
s = Solution()
low = 100
high = 300
print(s.sequentialDigits(low, high))
low = 1000
high = 13000
print(s.sequentialDigits(low, high))
| [
"[email protected]"
]
| |
51262aa9b2128373dcc9dbe5997da0e04eaeca86 | 881ca022fb16096610b4c7cec84910fbd304f52b | /libs/scapy/contrib/automotive/someip.py | 8c0e8d54125e818b9e0beb7bc914fcc54de11632 | []
| no_license | mdsakibur192/esp32_bluetooth_classic_sniffer | df54a898c9b4b3e2b5d85b1c00dd597d52844d9f | 7e8be27455f1d271fb92c074cb5118cc43854561 | refs/heads/master | 2023-07-31T14:29:22.989311 | 2021-09-08T11:18:21 | 2021-09-08T11:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,154 | py | # MIT License
# Copyright (c) 2018 Jose Amores
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Sebastian Baar <[email protected]>
# This program is published under a GPLv2 license
# scapy.contrib.description = Scalable service-Oriented MiddlewarE/IP (SOME/IP)
# scapy.contrib.status = loads
import ctypes
import collections
import struct
from scapy.layers.inet import TCP, UDP
from scapy.layers.inet6 import IP6Field
from scapy.compat import raw, orb
from scapy.config import conf
from scapy.modules.six.moves import range
from scapy.packet import Packet, Raw, bind_top_down, bind_bottom_up
from scapy.fields import XShortField, BitEnumField, ConditionalField, \
BitField, XBitField, IntField, XByteField, ByteEnumField, \
ShortField, X3BytesField, StrLenField, IPField, FieldLenField, \
PacketListField, XIntField
class SOMEIP(Packet):
""" SOME/IP Packet."""
PROTOCOL_VERSION = 0x01
INTERFACE_VERSION = 0x01
LEN_OFFSET = 0x08
LEN_OFFSET_TP = 0x0c
TYPE_REQUEST = 0x00
TYPE_REQUEST_NO_RET = 0x01
TYPE_NOTIFICATION = 0x02
TYPE_REQUEST_ACK = 0x40
TYPE_REQUEST_NORET_ACK = 0x41
TYPE_NOTIFICATION_ACK = 0x42
TYPE_RESPONSE = 0x80
TYPE_ERROR = 0x81
TYPE_RESPONSE_ACK = 0xc0
TYPE_ERROR_ACK = 0xc1
TYPE_TP_REQUEST = 0x20
TYPE_TP_REQUEST_NO_RET = 0x21
TYPE_TP_NOTIFICATION = 0x22
TYPE_TP_RESPONSE = 0x23
TYPE_TP_ERROR = 0x24
RET_E_OK = 0x00
RET_E_NOT_OK = 0x01
RET_E_UNKNOWN_SERVICE = 0x02
RET_E_UNKNOWN_METHOD = 0x03
RET_E_NOT_READY = 0x04
RET_E_NOT_REACHABLE = 0x05
RET_E_TIMEOUT = 0x06
RET_E_WRONG_PROTOCOL_V = 0x07
RET_E_WRONG_INTERFACE_V = 0x08
RET_E_MALFORMED_MSG = 0x09
RET_E_WRONG_MESSAGE_TYPE = 0x0a
_OVERALL_LEN_NOPAYLOAD = 16
name = "SOME/IP"
fields_desc = [
XShortField("srv_id", 0),
BitEnumField("sub_id", 0, 1, {0: "METHOD_ID", 1: "EVENT_ID"}),
ConditionalField(XBitField("method_id", 0, 15),
lambda pkt: pkt.sub_id == 0),
ConditionalField(XBitField("event_id", 0, 15),
lambda pkt: pkt.sub_id == 1),
IntField("len", None),
XShortField("client_id", 0),
XShortField("session_id", 0),
XByteField("proto_ver", PROTOCOL_VERSION),
XByteField("iface_ver", INTERFACE_VERSION),
ByteEnumField("msg_type", TYPE_REQUEST, {
TYPE_REQUEST: "REQUEST",
TYPE_REQUEST_NO_RET: "REQUEST_NO_RETURN",
TYPE_NOTIFICATION: "NOTIFICATION",
TYPE_REQUEST_ACK: "REQUEST_ACK",
TYPE_REQUEST_NORET_ACK: "REQUEST_NO_RETURN_ACK",
TYPE_NOTIFICATION_ACK: "NOTIFICATION_ACK",
TYPE_RESPONSE: "RESPONSE",
TYPE_ERROR: "ERROR",
TYPE_RESPONSE_ACK: "RESPONSE_ACK",
TYPE_ERROR_ACK: "ERROR_ACK",
TYPE_TP_REQUEST: "TP_REQUEST",
TYPE_TP_REQUEST_NO_RET: "TP_REQUEST_NO_RETURN",
TYPE_TP_NOTIFICATION: "TP_NOTIFICATION",
TYPE_TP_RESPONSE: "TP_RESPONSE",
TYPE_TP_ERROR: "TP_ERROR",
}),
ByteEnumField("retcode", 0, {
RET_E_OK: "E_OK",
RET_E_NOT_OK: "E_NOT_OK",
RET_E_UNKNOWN_SERVICE: "E_UNKNOWN_SERVICE",
RET_E_UNKNOWN_METHOD: "E_UNKNOWN_METHOD",
RET_E_NOT_READY: "E_NOT_READY",
RET_E_NOT_REACHABLE: "E_NOT_REACHABLE",
RET_E_TIMEOUT: "E_TIMEOUT",
RET_E_WRONG_PROTOCOL_V: "E_WRONG_PROTOCOL_VERSION",
RET_E_WRONG_INTERFACE_V: "E_WRONG_INTERFACE_VERSION",
RET_E_MALFORMED_MSG: "E_MALFORMED_MESSAGE",
RET_E_WRONG_MESSAGE_TYPE: "E_WRONG_MESSAGE_TYPE",
}),
ConditionalField(BitField("offset", 0, 28),
lambda pkt: SOMEIP._is_tp(pkt)),
ConditionalField(BitField("res", 0, 3),
lambda pkt: SOMEIP._is_tp(pkt)),
ConditionalField(BitField("more_seg", 0, 1),
lambda pkt: SOMEIP._is_tp(pkt))
]
def post_build(self, pkt, pay):
length = self.len
if length is None:
if SOMEIP._is_tp(self):
length = SOMEIP.LEN_OFFSET_TP + len(pay)
else:
length = SOMEIP.LEN_OFFSET + len(pay)
pkt = pkt[:4] + struct.pack("!I", length) + pkt[8:]
return pkt + pay
def answers(self, other):
if other.__class__ == self.__class__:
if self.msg_type in [SOMEIP.TYPE_REQUEST_NO_RET,
SOMEIP.TYPE_REQUEST_NORET_ACK,
SOMEIP.TYPE_NOTIFICATION,
SOMEIP.TYPE_TP_REQUEST_NO_RET,
SOMEIP.TYPE_TP_NOTIFICATION]:
return 0
return self.payload.answers(other.payload)
return 0
@staticmethod
def _is_tp(pkt):
"""Returns true if pkt is using SOMEIP-TP, else returns false."""
tp = [SOMEIP.TYPE_TP_REQUEST, SOMEIP.TYPE_TP_REQUEST_NO_RET,
SOMEIP.TYPE_TP_NOTIFICATION, SOMEIP.TYPE_TP_RESPONSE,
SOMEIP.TYPE_TP_ERROR]
if isinstance(pkt, Packet):
return pkt.msg_type in tp
else:
return pkt[15] in tp
def fragment(self, fragsize=1392):
"""Fragment SOME/IP-TP"""
fnb = 0
fl = self
lst = list()
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = raw(p[fnb].payload)
nb = (len(s) + fragsize) // fragsize
for i in range(nb):
q = p.copy()
del q[fnb].payload
q[fnb].len = SOMEIP.LEN_OFFSET_TP + \
len(s[i * fragsize:(i + 1) * fragsize])
q[fnb].more_seg = 1
if i == nb - 1:
q[fnb].more_seg = 0
q[fnb].offset += i * fragsize // 16
r = conf.raw_layer(load=s[i * fragsize:(i + 1) * fragsize])
r.overload_fields = p[fnb].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
def _bind_someip_layers():
bind_top_down(UDP, SOMEIP, sport=30490, dport=30490)
for i in range(15):
bind_bottom_up(UDP, SOMEIP, sport=30490 + i)
bind_bottom_up(TCP, SOMEIP, sport=30490 + i)
bind_bottom_up(UDP, SOMEIP, dport=30490 + i)
bind_bottom_up(TCP, SOMEIP, dport=30490 + i)
_bind_someip_layers()
class _SDPacketBase(Packet):
""" base class to be used among all SD Packet definitions."""
def extract_padding(self, s):
return "", s
SDENTRY_TYPE_SRV_FINDSERVICE = 0x00
SDENTRY_TYPE_SRV_OFFERSERVICE = 0x01
SDENTRY_TYPE_SRV = (SDENTRY_TYPE_SRV_FINDSERVICE,
SDENTRY_TYPE_SRV_OFFERSERVICE)
SDENTRY_TYPE_EVTGRP_SUBSCRIBE = 0x06
SDENTRY_TYPE_EVTGRP_SUBSCRIBE_ACK = 0x07
SDENTRY_TYPE_EVTGRP = (SDENTRY_TYPE_EVTGRP_SUBSCRIBE,
SDENTRY_TYPE_EVTGRP_SUBSCRIBE_ACK)
SDENTRY_OVERALL_LEN = 16
def _MAKE_SDENTRY_COMMON_FIELDS_DESC(type):
return [
XByteField("type", type),
XByteField("index_1", 0),
XByteField("index_2", 0),
XBitField("n_opt_1", 0, 4),
XBitField("n_opt_2", 0, 4),
XShortField("srv_id", 0),
XShortField("inst_id", 0),
XByteField("major_ver", 0),
X3BytesField("ttl", 0)
]
class SDEntry_Service(_SDPacketBase):
name = "Service Entry"
fields_desc = _MAKE_SDENTRY_COMMON_FIELDS_DESC(
SDENTRY_TYPE_SRV_FINDSERVICE)
fields_desc += [
XIntField("minor_ver", 0)
]
class SDEntry_EventGroup(_SDPacketBase):
name = "Eventgroup Entry"
fields_desc = _MAKE_SDENTRY_COMMON_FIELDS_DESC(
SDENTRY_TYPE_EVTGRP_SUBSCRIBE)
fields_desc += [
XBitField("res", 0, 12),
XBitField("cnt", 0, 4),
XShortField("eventgroup_id", 0)
]
def _sdentry_class(payload, **kargs):
TYPE_PAYLOAD_I = 0
pl_type = orb(payload[TYPE_PAYLOAD_I])
cls = None
if pl_type in SDENTRY_TYPE_SRV:
cls = SDEntry_Service
elif pl_type in SDENTRY_TYPE_EVTGRP:
cls = SDEntry_EventGroup
return cls(payload, **kargs)
def _sdoption_class(payload, **kargs):
pl_type = orb(payload[2])
cls = {
SDOPTION_CFG_TYPE: SDOption_Config,
SDOPTION_LOADBALANCE_TYPE: SDOption_LoadBalance,
SDOPTION_IP4_ENDPOINT_TYPE: SDOption_IP4_EndPoint,
SDOPTION_IP4_MCAST_TYPE: SDOption_IP4_Multicast,
SDOPTION_IP4_SDENDPOINT_TYPE: SDOption_IP4_SD_EndPoint,
SDOPTION_IP6_ENDPOINT_TYPE: SDOption_IP6_EndPoint,
SDOPTION_IP6_MCAST_TYPE: SDOption_IP6_Multicast,
SDOPTION_IP6_SDENDPOINT_TYPE: SDOption_IP6_SD_EndPoint
}.get(pl_type, Raw)
return cls(payload, **kargs)
# SD Option
SDOPTION_CFG_TYPE = 0x01
SDOPTION_LOADBALANCE_TYPE = 0x02
SDOPTION_LOADBALANCE_LEN = 0x05
SDOPTION_IP4_ENDPOINT_TYPE = 0x04
SDOPTION_IP4_ENDPOINT_LEN = 0x0009
SDOPTION_IP4_MCAST_TYPE = 0x14
SDOPTION_IP4_MCAST_LEN = 0x0009
SDOPTION_IP4_SDENDPOINT_TYPE = 0x24
SDOPTION_IP4_SDENDPOINT_LEN = 0x0009
SDOPTION_IP6_ENDPOINT_TYPE = 0x06
SDOPTION_IP6_ENDPOINT_LEN = 0x0015
SDOPTION_IP6_MCAST_TYPE = 0x16
SDOPTION_IP6_MCAST_LEN = 0x0015
SDOPTION_IP6_SDENDPOINT_TYPE = 0x26
SDOPTION_IP6_SDENDPOINT_LEN = 0x0015
def _MAKE_COMMON_SDOPTION_FIELDS_DESC(type, length=None):
return [
ShortField("len", length),
XByteField("type", type),
XByteField("res_hdr", 0)
]
def _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC():
return [
XByteField("res_tail", 0),
ByteEnumField("l4_proto", 0x11, {0x06: "TCP", 0x11: "UDP"}),
ShortField("port", 0)
]
class SDOption_Config(_SDPacketBase):
name = "Config Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(SDOPTION_CFG_TYPE) + [
StrLenField("cfg_str", "\x00", length_from=lambda pkt: pkt.len - 1)
]
def post_build(self, pkt, pay):
if self.len is None:
length = len(self.cfg_str) + 1 # res_hdr field takes 1 byte
pkt = struct.pack("!H", length) + pkt[2:]
return pkt + pay
@staticmethod
def make_string(data):
# Build a valid null-terminated configuration string from a dict or a
# list with key-value pairs.
#
# Example:
# >>> SDOption_Config.make_string({ "hello": "world" })
# b'\x0bhello=world\x00'
#
# >>> SDOption_Config.make_string([
# ... ("x", "y"),
# ... ("abc", "def"),
# ... ("123", "456")
# ... ])
# b'\x03x=y\x07abc=def\x07123=456\x00'
if isinstance(data, dict):
data = data.items()
# combine entries
data = ("{}={}".format(k, v) for k, v in data)
# prepend length
data = ("{}{}".format(chr(len(v)), v) for v in data)
# concatenate
data = "".join(data)
data += "\x00"
return data.encode("utf8")
class SDOption_LoadBalance(_SDPacketBase):
name = "LoadBalance Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_LOADBALANCE_TYPE, SDOPTION_LOADBALANCE_LEN)
fields_desc += [
ShortField("priority", 0),
ShortField("weight", 0)
]
class SDOption_IP4_EndPoint(_SDPacketBase):
name = "IP4 EndPoint Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP4_ENDPOINT_TYPE, SDOPTION_IP4_ENDPOINT_LEN)
fields_desc += [
IPField("addr", "0.0.0.0"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
class SDOption_IP4_Multicast(_SDPacketBase):
name = "IP4 Multicast Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP4_MCAST_TYPE, SDOPTION_IP4_MCAST_LEN)
fields_desc += [
IPField("addr", "0.0.0.0"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
class SDOption_IP4_SD_EndPoint(_SDPacketBase):
name = "IP4 SDEndPoint Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP4_SDENDPOINT_TYPE, SDOPTION_IP4_SDENDPOINT_LEN)
fields_desc += [
IPField("addr", "0.0.0.0"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
class SDOption_IP6_EndPoint(_SDPacketBase):
name = "IP6 EndPoint Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP6_ENDPOINT_TYPE, SDOPTION_IP6_ENDPOINT_LEN)
fields_desc += [
IP6Field("addr", "::"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
class SDOption_IP6_Multicast(_SDPacketBase):
name = "IP6 Multicast Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP6_MCAST_TYPE, SDOPTION_IP6_MCAST_LEN)
fields_desc += [
IP6Field("addr", "::"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
class SDOption_IP6_SD_EndPoint(_SDPacketBase):
name = "IP6 SDEndPoint Option"
fields_desc = _MAKE_COMMON_SDOPTION_FIELDS_DESC(
SDOPTION_IP6_SDENDPOINT_TYPE, SDOPTION_IP6_SDENDPOINT_LEN)
fields_desc += [
IP6Field("addr", "::"),
] + _MAKE_COMMON_IP_SDOPTION_FIELDS_DESC()
##
# SD PACKAGE DEFINITION
##
class SD(_SDPacketBase):
"""
SD Packet
NOTE : when adding 'entries' or 'options', do not use list.append()
method but create a new list
e.g. : p = SD()
p.option_array = [SDOption_Config(),SDOption_IP6_EndPoint()]
"""
SOMEIP_MSGID_SRVID = 0xffff
SOMEIP_MSGID_SUBID = 0x1
SOMEIP_MSGID_EVENTID = 0x100
SOMEIP_CLIENT_ID = 0x0000
SOMEIP_MINIMUM_SESSION_ID = 0x0001
SOMEIP_PROTO_VER = 0x01
SOMEIP_IFACE_VER = 0x01
SOMEIP_MSG_TYPE = SOMEIP.TYPE_NOTIFICATION
SOMEIP_RETCODE = SOMEIP.RET_E_OK
_sdFlag = collections.namedtuple('Flag', 'mask offset')
FLAGSDEF = {
"REBOOT": _sdFlag(mask=0x80, offset=7),
"UNICAST": _sdFlag(mask=0x40, offset=6)
}
name = "SD"
fields_desc = [
XByteField("flags", 0),
X3BytesField("res", 0),
FieldLenField("len_entry_array", None,
length_of="entry_array", fmt="!I"),
PacketListField("entry_array", None, cls=_sdentry_class,
length_from=lambda pkt: pkt.len_entry_array),
FieldLenField("len_option_array", None,
length_of="option_array", fmt="!I"),
PacketListField("option_array", None, cls=_sdoption_class,
length_from=lambda pkt: pkt.len_option_array)
]
def get_flag(self, name):
name = name.upper()
if name in self.FLAGSDEF:
return ((self.flags & self.FLAGSDEF[name].mask) >>
self.FLAGSDEF[name].offset)
else:
return None
def set_flag(self, name, value):
name = name.upper()
if name in self.FLAGSDEF:
self.flags = (self.flags &
(ctypes.c_ubyte(~self.FLAGSDEF[name].mask).value)) \
| ((value & 0x01) << self.FLAGSDEF[name].offset)
def set_entryArray(self, entry_list):
if isinstance(entry_list, list):
self.entry_array = entry_list
else:
self.entry_array = [entry_list]
def set_optionArray(self, option_list):
if isinstance(option_list, list):
self.option_array = option_list
else:
self.option_array = [option_list]
bind_top_down(SOMEIP, SD,
srv_id=SD.SOMEIP_MSGID_SRVID,
sub_id=SD.SOMEIP_MSGID_SUBID,
client_id=SD.SOMEIP_CLIENT_ID,
session_id=SD.SOMEIP_MINIMUM_SESSION_ID,
event_id=SD.SOMEIP_MSGID_EVENTID,
proto_ver=SD.SOMEIP_PROTO_VER,
iface_ver=SD.SOMEIP_IFACE_VER,
msg_type=SD.SOMEIP_MSG_TYPE,
retcode=SD.SOMEIP_RETCODE)
bind_bottom_up(SOMEIP, SD,
srv_id=SD.SOMEIP_MSGID_SRVID,
sub_id=SD.SOMEIP_MSGID_SUBID,
event_id=SD.SOMEIP_MSGID_EVENTID,
proto_ver=SD.SOMEIP_PROTO_VER,
iface_ver=SD.SOMEIP_IFACE_VER,
msg_type=SD.SOMEIP_MSG_TYPE,
retcode=SD.SOMEIP_RETCODE)
# FIXME: Service Discovery messages shall be transported over UDP
# (TR_SOMEIP_00248)
# FIXME: The port 30490 (UDP and TCP as well) shall be only used for SOME/IP-SD
# and not used for applications communicating over SOME/IP
# (TR_SOMEIP_00020)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.