function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def test_get_py3_forks(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://api.github.com/repos/nick/progressbar/forks',
'[{"html_url": "https://github.com/coagulant/progressbar-python3", "name": "progressbar-python3"},'
'{"html_url": "https://github.com/mick/progressbar", "name": "progressbar"}]',
)
assert Github().get_py3_forks('nick/progressbar') == [{
'name': 'progressbar-python3',
'html_url': 'https://github.com/coagulant/progressbar-python3'
}] | futurecolors/gopython3 | [
2,
2,
2,
13,
1379656339
] |
def test_get_build_status(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://api.travis-ci.org/repos/coagulant/cleanweb',
'{"repo":{"slug": "coagulant/cleanweb", "last_build_state": "passed"}}'
)
assert TravisCI().get_build_status('coagulant/cleanweb') == {
'html_url': 'https://travis-ci.org/coagulant/cleanweb',
'last_build_state': 'passed',
} | futurecolors/gopython3 | [
2,
2,
2,
13,
1379656339
] |
def test_get_info_without_version(self):
json_string = """{"info":{
"name": "Django",
"home_page": "http://www.djangoproject.com/",
"classifiers": [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3"]
}, "urls": [{"upload_time": "2013-09-15T06:30:37"}]}"""
HTTPretty.register_uri(HTTPretty.GET,
"http://pypi.python.org/pypi/Django/json", json_string
)
assert PyPI().get_info('Django') == {
'py3_versions': ['3', '3.2', '3.3'],
'last_release_date': datetime.datetime(2013, 9, 15, 6, 30, 37, tzinfo=pytz.utc),
'name': 'Django',
'url': 'http://www.djangoproject.com/'
} | futurecolors/gopython3 | [
2,
2,
2,
13,
1379656339
] |
def __init__(self, daemon=True):
# properties is a local copy of tracked properties, in case that's useful
self.properties = {}
# callbacks is a dict mapping property names to lists of callbacks
self.callbacks = collections.defaultdict(set)
# prefix_callbacks is a trie used to match property names to prefixes
# which were registered for "wildcard" callbacks.
self.prefix_callbacks = trie.trie()
super().__init__(name='PropertyClient', daemon=daemon)
self.start() | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def run(self):
"""Thread target: do not call directly."""
self.running = True
while True:
property_name, value = self._receive_update()
self.properties[property_name] = value
for callbacks in [self.callbacks[property_name]] + list(self.prefix_callbacks.values(property_name)):
for callback, valueonly in callbacks:
try:
if valueonly:
callback(value)
else:
callback(property_name, value)
except Exception as e:
print('Caught exception in PropertyClient callback:')
traceback.print_exception(type(e), e, e.__traceback__) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def subscribe(self, property_name, callback, valueonly=False):
"""Register a callback to be called any time the named property is updated.
If valueonly is True, the callback will be called as: callback(new_value);
if valueonly is False, it will be called as callback(property_name, new_value).
Multiple callbacks can be registered for a single property_name.
"""
self.callbacks[property_name].add((callback, valueonly)) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def subscribe_prefix(self, property_prefix, callback):
"""Register a callback to be called any time a named property which is
prefixed by the property_prefix parameter is updated. The callback is
called as callback(property_name, new_value).
Example: if property_prefix is 'camera.', then the callback will be called
when 'camera.foo' or 'camera.bar' or any such property name is updated.
An empty prefix ('') will match everything.
Multiple callbacks can be registered for a single property_prefix.
"""
if property_prefix not in self.prefix_callbacks:
self.prefix_callbacks[property_prefix] = set()
self.prefix_callbacks[property_prefix].add((callback, False)) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def _receive_update(self):
"""Receive an update from the server, or raise an error if self.running
goes False."""
raise NotImplementedError() | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def __init__(self, addr, heartbeat_sec=None, context=None, daemon=True):
"""PropertyClient subclass that uses ZeroMQ PUB/SUB to receive out updates.
Parameters:
addr: a string ZeroMQ port identifier, like 'tcp://127.0.0.1:5555'.
context: a ZeroMQ context to share, if one already exists.
daemon: exit the client when the foreground thread exits.
"""
self.context = context if context is not None else zmq.Context()
self.addr = addr
self.heartbeat_sec = heartbeat_sec
self.connected = threading.Event()
super().__init__(daemon) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def reconnect(self):
self.connected.clear()
self.connected.wait() | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def subscribe(self, property_name, callback, valueonly=False):
self.connected.wait()
self.socket.subscribe(property_name)
super().subscribe(property_name, callback, valueonly) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def unsubscribe(self, property_name, callback, valueonly=False):
super().unsubscribe(property_name, callback, valueonly)
self.connected.wait()
self.socket.unsubscribe(property_name) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def subscribe_prefix(self, property_prefix, callback):
self.connected.wait()
self.socket.subscribe(property_prefix)
super().subscribe_prefix(property_prefix, callback) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def unsubscribe_prefix(self, property_prefix, callback):
super().unsubscribe_prefix(property_prefix, callback)
self.connected.wait()
self.socket.unsubscribe(property_prefix) | zplab/rpc-scope | [
1,
3,
1,
2,
1461965724
] |
def connect (addr, family = socket.AF_INET, bind = None):
"""
Convenience function for opening client sockets.
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param bind: Local address to bind to, optional.
:return: The connected green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if bind is not None:
sock.bind(bind)
sock.connect(addr)
return sock | inercia/evy | [
4,
1,
4,
5,
1352288573
] |
def _stop_checker (t, server_gt, conn):
try:
try:
t.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
kill(server_gt, *sys.exc_info()) | inercia/evy | [
4,
1,
4,
5,
1352288573
] |
def myhandle(client_sock, client_addr):
print "client connected", client_addr | inercia/evy | [
4,
1,
4,
5,
1352288573
] |
def __init__(self, plotly_name="font", parent_name="box.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size . | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, *args, **kw):
"""
Initialize a :class:`SystemLogging` object.
:param args: Positional arguments to :func:`enable_system_logging()`.
:param kw: Keyword arguments to :func:`enable_system_logging()`.
"""
self.args = args
self.kw = kw
self.handler = None | xolox/python-coloredlogs | [
500,
39,
500,
31,
1369940232
] |
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
"""
Disable system logging when leaving the context.
.. note:: If an exception is being handled when we leave the context a
warning message including traceback is logged *before* system
logging is disabled.
"""
if self.handler is not None:
if exc_type is not None:
logger.warning("Disabling system logging due to unhandled exception!", exc_info=True)
(self.kw.get('logger') or logging.getLogger()).removeHandler(self.handler)
self.handler = None | xolox/python-coloredlogs | [
500,
39,
500,
31,
1369940232
] |
def connect_to_syslog(address=None, facility=None, level=None):
"""
Create a :class:`~logging.handlers.SysLogHandler`.
:param address: The device file or network address of the system logging
daemon (a string or tuple, defaults to the result of
:func:`find_syslog_address()`).
:param facility: Refer to :class:`~logging.handlers.SysLogHandler`.
Defaults to ``LOG_USER``.
:param level: The logging level for the :class:`~logging.handlers.SysLogHandler`
(defaults to :data:`.DEFAULT_LOG_LEVEL`). This value is coerced
using :func:`~coloredlogs.level_to_number()`.
:returns: A :class:`~logging.handlers.SysLogHandler` object or :data:`None` (if the
system logging daemon is unavailable).
The process of connecting to the system logging daemon goes as follows:
- The following two socket types are tried (in decreasing preference):
1. :data:`~socket.SOCK_RAW` avoids truncation of log messages but may
not be supported.
2. :data:`~socket.SOCK_STREAM` (TCP) supports longer messages than the
default (which is UDP).
"""
if not address:
address = find_syslog_address()
if facility is None:
facility = logging.handlers.SysLogHandler.LOG_USER
if level is None:
level = DEFAULT_LOG_LEVEL
for socktype in socket.SOCK_RAW, socket.SOCK_STREAM, None:
kw = dict(facility=facility, address=address)
if socktype is not None:
kw['socktype'] = socktype
try:
handler = logging.handlers.SysLogHandler(**kw)
except IOError:
# IOError is a superclass of socket.error which can be raised if the system
# logging daemon is unavailable.
pass
else:
handler.setLevel(level_to_number(level))
return handler | xolox/python-coloredlogs | [
500,
39,
500,
31,
1369940232
] |
def is_syslog_supported():
"""
Determine whether system logging is supported.
:returns:
:data:`True` if system logging is supported and can be enabled,
:data:`False` if system logging is not supported or there are good
reasons for not enabling it.
The decision making process here is as follows:
Override
If the environment variable ``$COLOREDLOGS_SYSLOG`` is set it is evaluated
using :func:`~humanfriendly.coerce_boolean()` and the resulting value
overrides the platform detection discussed below, this allows users to
override the decision making process if they disagree / know better.
Linux / UNIX
On systems that are not Windows or MacOS (see below) we assume UNIX which
means either syslog is available or sending a bunch of UDP packets to
nowhere won't hurt anyone...
Microsoft Windows
Over the years I've had multiple reports of :pypi:`coloredlogs` spewing
extremely verbose errno 10057 warning messages to the console (once for
each log message I suppose) so I now assume it a default that
"syslog-style system logging" is not generally available on Windows.
Apple MacOS
There's cPython issue `#38780`_ which seems to result in a fatal exception
when the Python interpreter shuts down. This is (way) worse than not
having system logging enabled. The error message mentioned in `#38780`_
has actually been following me around for years now, see for example:
- https://github.com/xolox/python-rotate-backups/issues/9 mentions Docker
images implying Linux, so not strictly the same as `#38780`_.
- https://github.com/xolox/python-npm-accel/issues/4 is definitely related
to `#38780`_ and is what eventually prompted me to add the
:func:`is_syslog_supported()` logic.
.. _#38780: https://bugs.python.org/issue38780
"""
override = os.environ.get("COLOREDLOGS_SYSLOG")
if override is not None:
return coerce_boolean(override)
else:
return not (on_windows() or on_macos()) | xolox/python-coloredlogs | [
500,
39,
500,
31,
1369940232
] |
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {}) | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def prepare_request(next_link=None):
if not next_link: | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def __init__(self, aliases):
self.aliases = aliases
self.priorities = {k: i for i, k in enumerate(aliases)} | openvenues/lieu | [
76,
23,
76,
15,
1496013990
] |
def get(self, key, default=None):
return self.aliases.get(key, default) | openvenues/lieu | [
76,
23,
76,
15,
1496013990
] |
def from_geojson(cls, data):
properties = data.get('properties')
properties = {k: safe_decode(v) if k in cls.field_map.aliases else v for k, v in six.iteritems(properties)}
fields = cls.field_map.replace(properties)
lon, lat = data.get('geometry', {}).get('coordinates', (None, None))
try:
lat, lon = latlon_to_decimal(lat, lon)
except ValueError:
lat = lon = None
if lat is not None:
fields[Coordinates.LATITUDE] = lat
if lon is not None:
fields[Coordinates.LONGITUDE] = lon
return fields | openvenues/lieu | [
76,
23,
76,
15,
1496013990
] |
def __init__(self, x):
self.val = x
self.next = None | jiadaizhao/LeetCode | [
39,
21,
39,
2,
1502171846
] |
def __init__(self, name: str, title: str) -> None:
self.name = name
self.title = title | pudo/nomenklatura | [
159,
36,
159,
5,
1342346008
] |
def __lt__(self, other: "Dataset") -> bool:
return self.name.__lt__(other.name) | pudo/nomenklatura | [
159,
36,
159,
5,
1342346008
] |
def __init__(self, id, web_url, timeout=None):
UrlParser.__init__(self, id=id, web_url=web_url, timeout=timeout) | pgaref/HTTP_Request_Randomizer | [
140,
53,
140,
23,
1446231372
] |
def create_proxy_object(self, dataset):
# Check Field[0] for tags and field[1] for values!
ip = ""
port = None
anonymity = AnonymityLevel.UNKNOWN
country = None
protocols = []
for field in dataset:
if field[0] == 'IP Address':
# Make sure it is a Valid IP
ip = field[1].strip() # String strip()
# Make sure it is a Valid IP
if not UrlParser.valid_ip(ip):
logger.debug("IP with Invalid format: {}".format(ip))
return None
elif field[0] == 'Port':
port = field[1].strip() # String strip()
elif field[0] == 'Anonymity':
anonymity = AnonymityLevel.get(field[1].strip()) # String strip()
elif field[0] == 'Country':
country = field[1].strip() # String strip()
elif field[0] == 'Https':
if field[1].strip().lower() == 'yes': protocols.extend([Protocol.HTTP, Protocol.HTTPS])
elif field[1].strip().lower() == 'no': protocols.append(Protocol.HTTP)
return ProxyObject(source=self.id, ip=ip, port=port, anonymity_level=anonymity, country=country, protocols=protocols) | pgaref/HTTP_Request_Randomizer | [
140,
53,
140,
23,
1446231372
] |
def __init__(self, state, action, next_state, reward=None):
self.state = state
self.action = action
self.next_state = next_state
self.reward = reward | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def bonus(self):
"""The bonus added to the reward to encourage exploration.
Returns
-------
float :
The bonus added to the reward.
"""
return self._bonus | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def bonus(self, value):
self._bonus = value | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __getstate__(self):
return {
'reward': self.reward,
'rmax': self.rmax,
'bonus': self.bonus,
'activate_bonus': self.activate_bonus
} | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set(self, value, *args, **kwargs):
"""Set the reward value.
If :meth:`cb_set` is set, the callback will be called
to set the value.
Parameters
----------
args : tuple
Positional arguments passed to the callback.
kwargs : dict
Non-positional arguments passed to the callback.
"""
if self.cb_set is not None:
type(self).reward = self.cb_set(*args, **kwargs)
return
type(self).reward = value | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __init__(self):
self.transition_proba = ProbabilityDistribution()
self.reward_func = RewardFunction()
self.visits = 0
self.known = False | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __setstate__(self, d):
for name, value in d.iteritems():
setattr(self, name, value) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __init__(self, state_id, actions):
self.id = state_id
""":type: int"""
self.models = {a: StateActionInfo() for a in actions}
""":type: dict[Action, StateActionInfo]"""
# Randomizing the initial q-values impedes performance
# self.q = {a: ((0.01 - 0.0) * np.random.random() + 0.0) for a in actions}
self.q = {a: 0.0 for a in actions}
""":type: dict[Action, float]"""
self.steps_away = 100000
""":type: int""" | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __setstate__(self, d):
for name, value in d.iteritems():
setattr(self, name, value) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def name(self):
"""The name of the MDP primitive.
Returns
-------
str :
The name of the primitive.
"""
return self._name | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_nfeatures(cls, n):
"""Set the number of features.
Parameters
----------
n : int
The number of features.
Raises
------
ValueError
If `n` is not of type integer.
"""
if not isinstance(n, int):
raise ValueError("Attribute 'nfeatures' must be of <type 'int'>, got %s" % str(type(n)))
cls.nfeatures = n | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_dtype(cls, value=DTYPE_FLOAT):
"""Set the feature's data type.
Parameters
----------
value : {DTYPE_FLOAT, DTYPE_INT, DTYPE_OBJECT}
The data type.
Raises
------
ValueError
If the data type is not one of the allowed types.
"""
if value not in [np.float64, np.int32, np.object]:
raise ValueError("Attribute 'dtype' must be one of the allowed types, got %s" % str(type(value)))
cls.dtype = value | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_description(cls, descr):
"""Set the feature description.
This extracts the number of features from the description and checks
that it matches with the `nfeatures`. If `nfeatures` is None, `nfeatures`
is set to the extracted value.
Parameters
----------
descr : dict
The feature description.
Raises
------
ValueError
If the number of features extracted from the description does not
match `nfeatures` or if `name` isn't of type string.
Notes
-----
Use the `description` to encode action information. The information
should contain the list of all available feature combinations, the
name of each feature.
Examples
--------
A description of an action with three possible discrete actions:
::
{
"out": {"value": [-0.004]},
"in": {"value": [0.004]},
"kick": {"value": [-1.0]}
}
A description of an action with one possible continuous action with
name `move`, a value of `*` allows to find the action for every
feature array. Additional information encodes the feature name together
with its index into the feature array are given for each higher level
element of feature array:
::
{
"move": {
"value": "*",
"descr": {
"LArm": {"dx": 0, "dy": 1, "dz": 2},
"RArm": {"dx": 3, "dy": 4, "dz": 5},
"LLeg": {"dx": 6, "dy": 7, "dz": 8},
"RLeg": {"dx": 9, "dy": 10, "dz": 11},
"Torso": {"dx": 12, "dy": 13, "dz": 14}
}
}
}
Similarly, a continuous state can be encoded as follows, which identifies
the name of each feature together with its index into the feature array:
::
{
"LArm": {"x": 0, "y": 1, "z": 2},
"RArm": {"x": 3, "y": 4, "z": 5},
"LLeg": {"x": 6, "y": 7, "z": 8},
"RLeg": {"x": 9, "y": 10, "z": 11},
"Torso": {"x": 12, "y": 13, "z": 14}
}
A discrete state can be encoded by identifying the position of each feature:
::
"descr": {
"image x-position": 0,
"displacement (mm)": 1
}
Alternatively, the feature can be identified by a list of features, giving he
positional description:
::
["image x-position", "displacement (mm)"]
"""
nfeatures = None
if isinstance(descr, dict):
config = descr.itervalues().next()
if 'descr' in config:
nfeatures = sum(len(v) for v in config['descr'].itervalues())
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: array described by 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
elif 'value' in config and not config['value'] == '*':
nfeatures = len(config['value'])
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: array described by 'value' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
else:
nfeatures = sum(len(v) for v in descr.itervalues())
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
elif isinstance(descr, list):
nfeatures = len(descr)
if cls.nfeatures is not None and not cls.nfeatures == nfeatures:
raise ValueError("Dimension mismatch: 'descr' is a vector of length %d,"
" but attribute cls.nfeatures = %d" % (nfeatures, cls.nfeatures))
if cls.nfeatures is None:
cls.nfeatures = nfeatures
cls.description = descr | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_discretized(cls, val=False):
"""Sets the `discretized` flag.
Parameters
----------
val : bool
Flag identifying whether the features are discretized or not.
Default is False.
Raises
------
ValueError
If `val` is not boolean type.
"""
if not isinstance(val, bool):
raise ValueError("Attribute 'nfeatures' must be of <type 'bool'>, got %s" % str(type(val)))
cls.discretized = val | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_minmax_features(cls, _min, _max):
"""Sets the minimum and maximum value for each feature.
This extracts the number of features from the `_min` and `_max`
values and ensures that it matches with `nfeatures`. If `nfeatures`
is None, the `nfeatures` attribute is set to the extracted value.
Parameters
----------
_min : array_like, shape(`nfeatures`,)
The minimum value for each feature
_max : array_like, shape(`nfeatures`,)
The maximum value for each feature
Raises
------
ValueError
If the arrays are not one-dimensional vectors, the shapes of the
arrays don't match, or the number of features does not agree with
the attribute `nfeatures`.
"""
_min = np.asarray(_min, dtype=cls.dtype)
_max = np.asarray(_max, dtype=cls.dtype)
dim = _min.size
if dim == 1:
_min.shape = (1,)
dim = _max.size
if dim == 1:
_max.shape = (1,)
if _min.shape[0] != _max.shape[0]:
raise ValueError("Dimension mismatch: array '_min' is a vector of length %d,"
" but '_max' is of length %d" % (_min.shape[0], _max.shape[0]))
if cls.nfeatures is None:
cls.nfeatures = _min.shape[0]
if _min.shape[0] != cls.nfeatures:
raise ValueError("Arrays '_min' and '_max' must be of length %d." % cls.nfeatures)
cls.min_features = _min
cls.max_features = _max | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def set_states_per_dim(cls, nstates):
"""Sets the number of states per feature.
This extracts the number of features from `nstates` and compares
it to the attribute `nfeatures`. If it doesn't match, an exception
is thrown. If the `nfeatures` attribute is None, `nfeatures` is set
to the extracted value.
Parameters
----------
nstates : array_like, shape (`nfeatures`,)
The number of states per features
Raises
------
ValueError
If the array is not a vector of length `nfeatures`.
"""
nstates = np.asarray(nstates, dtype=cls.dtype)
dim = nstates.size
if dim == 1:
nstates.shape = (1,)
if cls.nfeatures is None:
cls.nfeatures = nstates.shape[0]
if nstates.ndim != 1 or nstates.shape[0] != cls.nfeatures:
raise ValueError("Array 'nstates' must be a vector of length %d." % cls.nfeatures)
cls.states_per_dim = nstates | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __get__(self, instance, owner):
return self._features | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __setitem__(self, index, value):
if index > len(self):
raise IndexError("Assignment index out of range")
self._features[index] = value | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __contains__(self, item):
return item in self._features | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __eq__(self, other):
return np.array_equal(other.get(), self._features) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __mul__(self, other):
return self._features * other | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __iter__(self):
self.ix = 0
return self | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __repr__(self):
features = np.array_str(self.encode())
return "\'" + self._name + "\':\t" + features if self._name else features | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __copy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k in self.__slots__:
try:
setattr(result, k, copy.copy(getattr(self, k)))
except AttributeError:
pass
return result | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __setstate__(self, d):
for name, value in d.iteritems():
if name not in ['nfeatures', 'dtype', 'description', 'discretized',
'min_features', 'max_features', 'states_per_dim']:
setattr(self, name, value)
type(self).nfeatures = self._features.shape[0] | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def tolist(self):
"""Returns the feature array as a list.
Returns
-------
list :
The features list.
"""
return self._features.tolist() | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def discretize(self):
"""Discretizes the state.
Discretize the state using the information from the minimum and
maximum values for each feature and the number of states attributed
to each feature.
"""
if not self.discretized:
return
nfeatures = type(self).nfeatures
min_features = type(self).min_features
max_features = type(self).max_features
states_per_dim = type(self).states_per_dim
if min_features is None or min_features.shape[0] != nfeatures:
raise ValueError("Attribute 'min_features' must be a vectors of length %d." % nfeatures)
if max_features is None or max_features.shape[0] != nfeatures:
raise ValueError("Attribute 'max_features' must be a vectors of length %d." % nfeatures)
if states_per_dim is None or states_per_dim.shape[0] != nfeatures:
raise ValueError("Attribute 'states_per_dim' must be a vectors of length %d." % nfeatures)
ds = []
for i, feat in enumerate(self):
factor = math.ceil(
(max_features[i] - min_features[i]) / states_per_dim[i])
if feat > 0:
bin_num = int((feat + factor / 2) / factor)
else:
bin_num = int((feat - factor / 2) / factor)
ds.append(bin_num * factor)
self._features = np.asarray(ds) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def decode(cls, _repr):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Decodes the state into its original representation.
Parameters
----------
_repr : tuple
The readable representation of the primitive.
Returns
-------
State :
The decoded state.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_decode(cls, _repr)
... pass
...
>>> MDPPrimitive.decode = classmethod(my_decode)
"""
return cls(_repr) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def key_to_index(key):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Maps internal name to group index.
Maps the internal name of a feature to the index of the corresponding
feature grouping. For example for a feature vector consisting of the
x-y-z position of the left and the right arm, the features for the left
and the right arm can be extracted separately as a group, effectively
splitting the feature vector into two vectors with x, y, and z at the
positions specified by the the mapping of this function.
Parameters
----------
key : str
The key into the mapping
Returns
-------
int :
The index in the feature array.
Raises
------
NotImplementedError
If the child class does not implement this function.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_key_to_index(key)
... return {
... "x": 0,
... "y": 1,
... "z": 2
... }[key]
...
>>> State.description = {'LArm': {'x': 0, 'y': 1, 'z': 2}
... 'RArm': {'x': 3, 'y': 4, 'z': 5}}
>>> State.key_to_index = staticmethod(my_key_to_index)
This specifies the mapping in both direction.
>>> state = [0.1, 0.4, 0.3. 4.6. 2.5. 0.9]
>>>
>>> mapping = State.description['LArm']
>>>
>>> larm = np.zeros[len(mapping.keys())]
>>> for key, axis in mapping.iteritems():
... larm[State.key_to_index(key)] = state[axis]
...
>>> print larm
[0.1, 0.4, 0.3]
This extracts the features for the left arm from the `state` vector.
"""
raise NotImplementedError | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __init__(self, features, name=None):
super(State, self).__init__(features, name) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def is_terminal(self):
"""Checks if the state is a terminal state.
Returns
-------
bool :
Whether the state is a terminal state or not.
"""
if State.terminal_states is None:
return False
if isinstance(State.terminal_states, list):
return self.name in State.terminal_states
return self.name == self.terminal_states | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def is_valid(self):
# noinspection PyUnresolvedReferences,PyUnusedLocal
"""Check if this state is a valid state.
Returns
-------
bool :
Whether the state is valid or not.
Notes
-----
Optionally this method can be overwritten at runtime.
Examples
--------
>>> def my_is_valid(self)
... pass
...
>>> MDPPrimitive.is_valid = my_is_valid
"""
return True | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def __init__(self, features, name=None):
super(Action, self).__init__(features, name)
self._name = name if name is not None else Action.get_name(self._features) | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def get_name(cls, features):
"""Retrieves the name of the action.
Retrieve the name of the action using the action's description. In the case
that all features are zero the action is considered a `no-op` action.
Parameters
----------
features : ndarray
A feature array.
Returns
-------
str :
The name of the action.
"""
features = np.asarray(features, dtype=cls.dtype)
if cls.description is not None:
for e, config in cls.description.iteritems():
if np.asarray(config["value"]).shape != features.shape:
ValueError("Dimension mismatch: array 'config['value']' is vector of length %d,"
" but 'features' is a vector of length %d." % (np.asarray(config["value"]).shape[0],
features.shape[0]))
if config["value"] == features or config["value"] == "*":
return e
if not features.any():
return "no-op"
return "" | evenmarbles/mlpy | [
7,
2,
7,
2,
1439328535
] |
def load(saved_model_dir: str) -> AutoTrackable:
"""Load a Tensorflow saved model"""
return tf.saved_model.load(saved_model_dir) | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def train(estimator: Estimator, data_root_dir: str, max_steps: int) -> Any:
"""Train a Tensorflow estimator"""
train_spec = tf.estimator.TrainSpec(
input_fn=_build_input_fn(data_root_dir, ModeKeys.TRAIN),
max_steps=max_steps,
)
if max_steps > Training.LONG_TRAINING_STEPS:
throttle_secs = Training.LONG_DELAY
else:
throttle_secs = Training.SHORT_DELAY
eval_spec = tf.estimator.EvalSpec(
input_fn=_build_input_fn(data_root_dir, ModeKeys.EVAL),
start_delay_secs=Training.SHORT_DELAY,
throttle_secs=throttle_secs,
)
LOGGER.debug('Train the model')
results = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
training_metrics = results[0]
return training_metrics | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def test(
saved_model: AutoTrackable,
data_root_dir: str,
mapping: Dict[str, str], | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def predict(
saved_model: AutoTrackable,
mapping: Dict[str, str],
text: str | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def _build_input_fn(
data_root_dir: str,
mode: ModeKeys, | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def input_function() -> tf.data.Dataset:
dataset = tf.data.Dataset
dataset = dataset.list_files(pattern, shuffle=True).map(_read_file)
if mode == ModeKeys.PREDICT:
return dataset.batch(1)
if mode == ModeKeys.TRAIN:
dataset = dataset.shuffle(Training.SHUFFLE_BUFFER).repeat()
return dataset.map(_preprocess).batch(HyperParameter.BATCH_SIZE) | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def _serving_input_receiver_fn() -> tf.estimator.export.ServingInputReceiver:
"""Function to serve model for predictions."""
content = tf.compat.v1.placeholder(tf.string, [None])
receiver_tensors = {'content': content}
features = {'content': tf.map_fn(_preprocess_text, content)}
return tf.estimator.export.ServingInputReceiver(
receiver_tensors=receiver_tensors,
features=features,
) | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def _preprocess(
data: tf.Tensor,
label: tf.Tensor, | yoeo/guesslang | [
644,
78,
644,
27,
1495396528
] |
def best_fit_slope_and_intercept(xs, ys):
m = (mean(xs) * mean(ys) - mean(xs*ys)) / ( mean(xs)*mean(xs) - mean(xs*xs) )
b = mean(ys) - m * mean(xs)
return m, b | aspiringguru/sentexTuts | [
1,
2,
1,
1,
1473483674
] |
def __init__(self):
self.parser = optparse.OptionParser()
self.set_options() | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def set_options(self):
"""Use optparser to manage options""" | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def parse(self):
"""Return the raw parsed user supplied values
:rtype: dict[str, str]
""" | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def manifest_location(self):
"""Return the location of the manifest file
:rtype: str
""" | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def manifest(self):
"""Get the parsed values from the manifest
:rtype: dict[str, mixed]
""" | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def destination(self):
"""Get the assembly location
:rtype: str
""" | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def extra_parameters(self):
"""Get extra parameters
:rtype: dict[str, str]
"""
params_string = self.parse().extra_parameters | hcpss-banderson/py-tasc | [
1,
1,
1,
2,
1452191848
] |
def tableAt(byte):
return crc32(chr(byte ^ 0xff)) & 0xffffffff ^ FINALXOR ^ (INITXOR >> 8) | tholum/PiBunny | [
198,
40,
198,
3,
1491360775
] |
def write_with_xml_declaration(self, file, encoding, xml_declaration):
assert xml_declaration is True # Support our use case only
file.write("<?xml version='1.0' encoding='utf-8'?>\n")
p26_write(self, file, encoding=encoding) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def __init__(self, baseuri, username, password, version=VERSION):
"""baseuri: Base URI for the GenoLogics server, excluding
the 'api' or version parts!
For example: https://genologics.scilifelab.se:8443/
username: The account name of the user to login as.
password: The password for the user account to login as.
version: The optional LIMS API version, by default 'v2'
"""
self.baseuri = baseuri.rstrip('/') + '/'
self.username = username
self.password = password
self.VERSION = version
self.cache = dict()
# For optimization purposes, enables requests to persist connections
self.request_session = requests.Session()
# The connection pool has a default size of 10
self.adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
self.request_session.mount('http://', self.adapter) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get(self, uri, params=dict()):
"GET data from the URI. Return the response XML as an ElementTree."
try:
r = self.request_session.get(uri, params=params,
auth=(self.username, self.password),
headers=dict(accept='application/xml'),
timeout=TIMEOUT)
except requests.exceptions.Timeout as e:
raise type(e)("{0}, Error trying to reach {1}".format(str(e), uri))
else:
return self.parse_response(r) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def upload_new_file(self, entity, file_to_upload):
"""Upload a file and attach it to the provided entity."""
file_to_upload = os.path.abspath(file_to_upload)
if not os.path.isfile(file_to_upload):
raise IOError("{} not found".format(file_to_upload))
# Request the storage space on glsstorage
# Create the xml to describe the file
root = ElementTree.Element(nsmap('file:file'))
s = ElementTree.SubElement(root, 'attached-to')
s.text = entity.uri
s = ElementTree.SubElement(root, 'original-location')
s.text = file_to_upload
root = self.post(
uri=self.get_uri('glsstorage'),
data=self.tostring(ElementTree.ElementTree(root))
)
# Create the file object
root = self.post(
uri=self.get_uri('files'),
data=self.tostring(ElementTree.ElementTree(root))
)
file = File(self, uri=root.attrib['uri'])
# Actually upload the file
uri = self.get_uri('files', file.id, 'upload')
r = requests.post(uri, files={'file': (file_to_upload, open(file_to_upload, 'rb'))},
auth=(self.username, self.password))
self.validate_response(r)
return file | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def post(self, uri, data, params=dict()):
"""POST the serialized XML to the given URI.
Return the response XML as an ElementTree.
"""
r = requests.post(uri, data=data, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.parse_response(r, accept_status_codes=[200, 201, 202]) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def check_version(self):
"""Raise ValueError if the version for this interface
does not match any of the versions given for the API.
"""
uri = urljoin(self.baseuri, 'api')
r = requests.get(uri, auth=(self.username, self.password))
root = self.parse_response(r)
tag = nsmap('ver:versions')
assert tag == root.tag
for node in root.findall('version'):
if node.attrib['major'] == self.VERSION: return
raise ValueError('version mismatch') | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def parse_response(self, response, accept_status_codes=[200]):
"""Parse the XML returned in the response.
Raise an HTTP error if the response status is not 200.
"""
self.validate_response(response, accept_status_codes)
root = ElementTree.fromstring(response.content)
return root | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_reagent_types(self, name=None, start_index=None):
"""Get a list of reqgent types, filtered by keyword arguments.
name: reagent type name, or list of names.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
start_index=start_index)
return self._get_instances(ReagentType, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_researchers(self, firstname=None, lastname=None, username=None,
last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of researchers, filtered by keyword arguments.
firstname: Researcher first name, or list of names.
lastname: Researcher last name, or list of names.
username: Researcher account name, or list of names.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(firstname=firstname,
lastname=lastname,
username=username,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Researcher, add_info=add_info, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_sample_number(self, name=None, projectname=None, projectlimsid=None,
udf=dict(), udtname=None, udt=dict(), start_index=None):
"""Gets the number of samples matching the query without fetching every
sample, so it should be faster than len(get_samples()"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
root = self.get(self.get_uri(Sample._URI), params=params)
total = 0
while params.get('start-index') is None: # Loop over all pages.
total += len(root.findall("sample"))
node = root.find('next-page')
if node is None: break
root = self.get(node.attrib['uri'], params=params)
return total | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_artifacts(self, name=None, type=None, process_type=None,
artifact_flag_name=None, working_flag=None, qc_flag=None,
sample_name=None, samplelimsid=None, artifactgroup=None, containername=None,
containerlimsid=None, reagent_label=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
resolve=False):
"""Get a list of artifacts, filtered by keyword arguments.
name: Artifact name, or list of names.
type: Artifact type, or list of types.
process_type: Produced by the process type, or list of types.
artifact_flag_name: Tagged with the genealogy flag, or list of flags.
working_flag: Having the given working flag; boolean.
qc_flag: Having the given QC flag: UNKNOWN, PASSED, FAILED.
sample_name: Related to the given sample name.
samplelimsid: Related to the given sample id.
artifactgroup: Belonging to the artifact group (experiment in client).
containername: Residing in given container, by name, or list.
containerlimsid: Residing in given container, by LIMS id, or list.
reagent_label: having attached reagent labels.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
type=type,
process_type=process_type,
artifact_flag_name=artifact_flag_name,
working_flag=working_flag,
qc_flag=qc_flag,
sample_name=sample_name,
samplelimsid=samplelimsid,
artifactgroup=artifactgroup,
containername=containername,
containerlimsid=containerlimsid,
reagent_label=reagent_label,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
if resolve:
return self.get_batch(self._get_instances(Artifact, params=params))
else:
return self._get_instances(Artifact, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_containers(self, name=None, type=None,
state=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of containers, filtered by keyword arguments.
name: Containers name, or list of names.
type: Container type, or list of types.
state: Container state: Empty, Populated, Discarded, Reagent-Only.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
type=type,
state=state,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Container, add_info=add_info, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
def get_automations(self, name=None, add_info=False):
"""Get the list of configured automations on the system """
params = self._get_params(name=name)
return self._get_instances(Automation, add_info=add_info, params=params) | SciLifeLab/genologics | [
25,
39,
25,
9,
1346852014
] |
Subsets and Splits