language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python
|
def get_diff(self, rev1, rev2, path=None, ignore_whitespace=False,
context=3):
"""
Returns (git like) *diff*, as plain text. Shows changes introduced by
``rev2`` since ``rev1``.
:param rev1: Entry point from which diff is shown. Can be
``self.EMPTY_CHANGESET`` - in this case, patch showing all
the changes since empty state of the repository until ``rev2``
:param rev2: Until which revision changes should be shown.
:param ignore_whitespace: If set to ``True``, would not show whitespace
changes. Defaults to ``False``.
:param context: How many lines before/after changed lines should be
shown. Defaults to ``3``.
"""
flags = ['-U%s' % context, '--full-index', '--binary', '-p', '-M', '--abbrev=40']
if ignore_whitespace:
flags.append('-w')
if hasattr(rev1, 'raw_id'):
rev1 = getattr(rev1, 'raw_id')
if hasattr(rev2, 'raw_id'):
rev2 = getattr(rev2, 'raw_id')
if rev1 == self.EMPTY_CHANGESET:
rev2 = self.get_changeset(rev2).raw_id
cmd = ' '.join(['show'] + flags + [rev2])
else:
rev1 = self.get_changeset(rev1).raw_id
rev2 = self.get_changeset(rev2).raw_id
cmd = ' '.join(['diff'] + flags + [rev1, rev2])
if path:
cmd += ' -- "%s"' % path
stdout, stderr = self.run_git_command(cmd)
# If we used 'show' command, strip first few lines (until actual diff
# starts)
if rev1 == self.EMPTY_CHANGESET:
lines = stdout.splitlines()
x = 0
for line in lines:
if line.startswith('diff'):
break
x += 1
# Append new line just like 'diff' command do
stdout = '\n'.join(lines[x:]) + '\n'
return stdout
|
java
|
boolean registerAddOn(AddOn pAddOn) {
if (!mRegisteredAddOns.containsKey(pAddOn.getAddOnId())) {
mRegisteredAddOns.put(pAddOn.getAddOnId(), pAddOn);
if (pAddOn.hasConfiguration()) {
addConfiguration(pAddOn.getAddOnId(), pAddOn.getConfigurationPane());
}
LOGGER.info("The add-on " + pAddOn.getAddOnId() + " has been registered.");
return true;
} else {
LOGGER.warn("The add-on " + pAddOn.getAddOnId() + " is alreadry registered.");
return false;
}
}
|
java
|
private void setChildObjects(Object pParent, ObjectMapper pOM)
throws SQLException {
if (pOM == null) {
throw new NullPointerException("ObjectMapper in readChildObjects "
+ "cannot be null!!");
}
for (Enumeration keys = pOM.mMapTypes.keys(); keys.hasMoreElements();) {
String property = (String) keys.nextElement();
String mapType = (String) pOM.mMapTypes.get(property);
if (property.length() <= 0 || mapType == null) {
continue;
}
// Get the id of the parent
Object id = getPropertyValue(pParent,
pOM.getProperty(pOM.getPrimaryKey()));
if (mapType.equals(ObjectMapper.OBJECTMAP)) {
// OBJECT Mapping
// Get the class for this property
Class objectClass = (Class) pOM.mClasses.get(property);
DatabaseReadable dbr = null;
try {
dbr = (DatabaseReadable) objectClass.newInstance();
}
catch (Exception e) {
mLog.logError(e);
}
/*
Properties mapping = readMapping(objectClass);
*/
// Get property mapping for child object
if (pOM.mJoins.containsKey(property))
// mapping.setProperty(".join", (String) pOM.joins.get(property));
dbr.getMapping().put(".join", pOM.mJoins.get(property));
// Find id and put in where hash
Hashtable where = new Hashtable();
// String foreignKey = mapping.getProperty(".foreignKey");
String foreignKey = (String)
dbr.getMapping().get(".foreignKey");
if (foreignKey != null) {
where.put(".foreignKey", id);
}
Object[] child = readObjects(dbr, where);
// Object[] child = readObjects(objectClass, mapping, where);
if (child.length < 1)
throw new SQLException("No child object with foreign key "
+ foreignKey + "=" + id);
else if (child.length != 1)
throw new SQLException("More than one object with foreign "
+ "key " + foreignKey + "=" + id);
// Set child object to the parent
setPropertyValue(pParent, property, child[0]);
}
else if (mapType.equals(ObjectMapper.COLLECTIONMAP)) {
// COLLECTION Mapping
// Get property mapping for child object
Hashtable mapping = pOM.getPropertyMapping(property);
// Find id and put in where hash
Hashtable where = new Hashtable();
String foreignKey = (String) mapping.get(".foreignKey");
if (foreignKey != null) {
where.put(".foreignKey", id);
}
DBObject dbr = new DBObject();
dbr.mapping = mapping; // ugh...
// Read the objects
Object[] objs = readObjects(dbr, where);
// Put the objects in a hash
Hashtable children = new Hashtable();
for (int i = 0; i < objs.length; i++) {
children.put(((DBObject) objs[i]).getId(),
((DBObject) objs[i]).getObject());
}
// Set child properties to parent object
setPropertyValue(pParent, property, children);
}
}
}
|
python
|
def configure(self, sbi_config: str):
"""Configure an SBI for this subarray.
Args:
sbi_config (str): SBI configuration JSON
Returns:
str,
"""
# print(sbi_config)
config_dict = json.loads(sbi_config)
self.debug_stream('SBI configuration:\n%s',
json.dumps(config_dict, indent=2))
try:
sbi = Subarray(self.get_name()).configure_sbi(config_dict)
except jsonschema.exceptions.ValidationError as error:
return json.dumps(dict(path=error.absolute_path.__str__(),
schema_path=error.schema_path.__str__(),
message=error.message), indent=2)
except RuntimeError as error:
return json.dumps(dict(error=str(error)), indent=2)
return 'Accepted SBI: {}'.format(sbi.id)
|
python
|
def lookup(command):
"""
Lookup a command in PATH. For example::
>>> lookup('ls')
'/usr/bin/ls'
>>> lookup('usermod')
'/usr/sbin/usermod'
>>> lookup('foobar')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Invalid command
This function is incredibly dumb, and does not search for an executable
file.
"""
for path in os.environ.get('PATH', '').split(':'):
fname = os.path.join(path, command)
if os.path.exists(fname):
return fname
raise ValueError("Invalid command")
|
java
|
public void setAddLicenseSpecifications(java.util.Collection<LicenseSpecification> addLicenseSpecifications) {
if (addLicenseSpecifications == null) {
this.addLicenseSpecifications = null;
return;
}
this.addLicenseSpecifications = new java.util.ArrayList<LicenseSpecification>(addLicenseSpecifications);
}
|
java
|
@Override
public int read(long pos, byte []buf, int offset, int length)
{
if (pos < 0) {
throw new IllegalArgumentException();
}
if (_length <= pos) {
return -1;
}
int bufOffset = (int) (_offset + pos);
int sublen = (int) Math.min(length, _length - pos);
System.arraycopy(_pageBuffer, bufOffset, buf, offset, sublen);
return sublen;
}
|
java
|
public void attachUserData(String connId, KeyValueCollection userData) throws WorkspaceApiException {
try {
VoicecallsidcompleteData completeData = new VoicecallsidcompleteData();
completeData.setUserData(Util.toKVList(userData));
UserDataOperationId data = new UserDataOperationId();
data.data(completeData);
ApiSuccessResponse response = this.voiceApi.attachUserData(connId, data);
throwIfNotOk("attachUserData", response);
} catch (ApiException e) {
throw new WorkspaceApiException("attachUserData failed.", e);
}
}
|
python
|
def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT):
"""
Convenience function to properly sort non-numeric characters.
This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``.
Parameters
----------
seq : iterable
The input to sort.
key : callable, optional
A key used to determine how to sort each element of the sequence.
It is **not** applied recursively.
It should accept a single argument and return a single value.
reverse : {{True, False}}, optional
Return the list in reversed sorted order. The default is
`False`.
alg : ns enum, optional
This option is used to control which algorithm `natsort`
uses when sorting. For details into these options, please see
the :class:`ns` class documentation. The default is `ns.LOCALE`.
Returns
-------
out : list
The sorted input.
See Also
--------
index_humansorted : Returns the sorted indexes from `humansorted`.
Notes
-----
Please read :ref:`locale_issues` before using `humansorted`.
Examples
--------
Use `humansorted` just like the builtin `sorted`::
>>> a = ['Apple', 'Banana', 'apple', 'banana']
>>> natsorted(a)
[{u}'Apple', {u}'Banana', {u}'apple', {u}'banana']
>>> humansorted(a)
[{u}'apple', {u}'Apple', {u}'banana', {u}'Banana']
"""
return natsorted(seq, key, reverse, alg | ns.LOCALE)
|
python
|
def _combine_attr_fast_update(self, attr, typ):
'''Avoids having to call _update for each intermediate base. Only
works for class attr of type UpdateDict.
'''
values = dict(getattr(self, attr, {}))
for base in self._class_data.bases:
vals = dict(getattr(base, attr, {}))
preserve_attr_data(vals, values)
values = combine(vals, values)
setattr(self, attr, typ(values))
|
python
|
def get_departures(self, stop_id, route, destination, api_key):
"""Get the latest data from Transport NSW."""
self.stop_id = stop_id
self.route = route
self.destination = destination
self.api_key = api_key
# Build the URL including the STOP_ID and the API key
url = \
'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' \
'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' \
'mode=direct&type_dm=stop&name_dm=' \
+ self.stop_id \
+ '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42'
auth = 'apikey ' + self.api_key
header = {'Accept': 'application/json', 'Authorization': auth}
# Send query or return error
try:
response = requests.get(url, headers=header, timeout=10)
except:
logger.warning("Network or Timeout error")
return self.info
# If there is no valid request
if response.status_code != 200:
logger.warning("Error with the request sent; check api key")
return self.info
# Parse the result as a JSON object
result = response.json()
# If there is no stop events for the query
try:
result['stopEvents']
except KeyError:
logger.warning("No stop events for this query")
return self.info
# Set variables
maxresults = 1
monitor = []
if self.destination != '':
for i in range(len(result['stopEvents'])):
destination = result['stopEvents'][i]['transportation']['destination']['name']
if destination == self.destination:
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if len(monitor) >= maxresults:
# We found enough results, lets stop
break
elif self.route != '':
# Find the next stop events for a specific route
for i in range(len(result['stopEvents'])):
number = result['stopEvents'][i]['transportation']['number']
if number == self.route:
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if len(monitor) >= maxresults:
# We found enough results, lets stop
break
else:
# No route defined, find any route leaving next
for i in range(0, maxresults):
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if monitor:
self.info = {
ATTR_STOP_ID: self.stop_id,
ATTR_ROUTE: monitor[0][0],
ATTR_DUE_IN: monitor[0][1],
ATTR_DELAY: monitor[0][2],
ATTR_REALTIME: monitor[0][5],
ATTR_DESTINATION: monitor[0][6],
ATTR_MODE: monitor[0][7]
}
return self.info
|
python
|
def encode(self, val):
"""
Encode the python datetime value into the bytes needed for database format.
:param val: The datetime object.
:type val: :class:`datetime.datetime`
:returns: Bytes for data.
:rtype: str
"""
# Just copied from original KeePassX source
y, mon, d, h, min_, s = val.timetuple()[:6]
dw1 = 0x0000FFFF & ((y >> 6) & 0x0000003F)
dw2 = 0x0000FFFF & ((y & 0x0000003F) << 2 | ((mon >> 2) & 0x00000003))
dw3 = 0x0000FFFF & (((mon & 0x0000003) << 6) | ((d & 0x0000001F) << 1) \
| ((h >> 4) & 0x00000001))
dw4 = 0x0000FFFF & (((h & 0x0000000F) << 4) | ((min_ >> 2) & 0x0000000F))
dw5 = 0x0000FFFF & (((min_ & 0x00000003) << 6) | (s & 0x0000003F))
return struct.pack('<5B', dw1, dw2, dw3, dw4, dw5)
|
java
|
protected void bindWebSecurityManager(AnnotatedBindingBuilder<? super WebSecurityManager> bind) {
try {
bind.toConstructor(DefaultWebSecurityManager.class.getConstructor(Collection.class)).asEagerSingleton();
} catch (NoSuchMethodException e) {
throw new ConfigurationException("This really shouldn't happen. Either something has changed in Shiro, or there's a bug in ShiroModule.", e);
}
}
|
python
|
def getLogger(*args, **kwargs):
"""
Summary:
custom format logger
Args:
mode (str): The Logger module supprts the following log modes:
- log to console / stdout. Log_mode = 'stream'
- log to file
- log to system logger (syslog)
Returns:
logger object | TYPE: logging
"""
log_mode = local_config['LOGGING']['LOG_MODE']
# log format - file
file_format = '%(asctime)s - %(pathname)s - %(name)s - [%(levelname)s]: %(message)s'
# log format - stream
stream_format = '%(pathname)s - %(name)s - [%(levelname)s]: %(message)s'
# log format - syslog
syslog_format = '- %(pathname)s - %(name)s - [%(levelname)s]: %(message)s'
# set facility for syslog:
if local_config['LOGGING']['SYSLOG_FILE']:
syslog_facility = 'local7'
else:
syslog_facility = 'user'
# all formats
asctime_format = "%Y-%m-%d %H:%M:%S"
# objects
logger = logging.getLogger(*args, **kwargs)
logger.propagate = False
try:
if not logger.handlers:
# branch on output format, default to stream
if mode_assignment(log_mode) == 'FILE':
# file handler
f_handler = logging.FileHandler(local_config['LOGGING']['LOG_PATH'])
f_formatter = logging.Formatter(file_format, asctime_format)
#f_formatter = logging.Formatter('%(asctime)s %(processName)s %(name)s [%(levelname)-5s]: %(message)s', asctime_format)
f_handler.setFormatter(f_formatter)
logger.addHandler(f_handler)
logger.setLevel(logging.DEBUG)
elif mode_assignment(log_mode) == 'STREAM':
# stream handlers
s_handler = logging.StreamHandler()
s_formatter = logging.Formatter(stream_format)
s_handler.setFormatter(s_formatter)
logger.addHandler(s_handler)
logger.setLevel(logging.DEBUG)
elif mode_assignment(log_mode) == 'SYSLOG':
sys_handler = logging.handlers.SysLogHandler(address='/dev/log', facility=syslog_facility)
sys_formatter = logging.Formatter(syslog_format)
sys_handler.setFormatter(sys_formatter)
logger.addHandler(sys_handler)
logger.setLevel(logging.DEBUG)
else:
syslog.warning(
'%s: [WARNING]: log_mode value of (%s) unrecognized - not supported' %
(inspect.stack()[0][3], str(log_mode))
)
ex = Exception(
'%s: Unsupported mode indicated by log_mode value: %s' %
(inspect.stack()[0][3], str(log_mode))
)
raise ex
except OSError as e:
raise e
return logger
|
python
|
def search(self, read_cache=True, ssl_verify=True, recid=None, **kwparams):
"""
Returns records corresponding to the given search query.
See docstring of invenio.legacy.search_engine.perform_request_search()
for an overview of available parameters.
"""
parse_results = False
of = kwparams.get('of', "")
if of == "":
parse_results = True
of = "xm"
kwparams['of'] = of
params = kwparams
cache_key = (json.dumps(params), parse_results)
if cache_key not in self.cached_queries or \
not read_cache:
if recid:
results = requests.get(self.server_url + '/record/' + recid,
params=params, cookies=self.cookies,
stream=True, verify=ssl_verify,
allow_redirects=True)
if results.history:
new_recid = urlparse(results.url).path.split('/')[-1]
raise InvenioConnectorServerError('The record has been'
'merged with recid ' +
new_recid)
else:
results = requests.get(self.server_url + "/search",
params=params, cookies=self.cookies,
stream=True, verify=ssl_verify)
if 'youraccount/login' in results.url:
# Current user not able to search collection
raise InvenioConnectorAuthError(
"You are trying to search a restricted collection. "
"Please authenticate yourself.\n")
else:
return self.cached_queries[cache_key]
if parse_results:
# FIXME: we should not try to parse if results is string
parsed_records = self._parse_results(results.raw,
self.cached_records)
self.cached_queries[cache_key] = parsed_records
return parsed_records
else:
# pylint: disable=E1103
# The whole point of the following code is to make sure we can
# handle two types of variable.
try:
res = results.content
except AttributeError:
res = results
# pylint: enable=E1103
if of == "id":
try:
if isinstance(res, binary_type):
# Transform to list
res = [int(recid.strip()) for record_id in
res.decode('utf-8').strip("[]").split(",")
if record_id.strip() != ""]
res.reverse()
except (ValueError, AttributeError):
res = []
self.cached_queries[cache_key] = res
return res
|
python
|
def resend_invitations(self):
"""
Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite.
"""
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self
|
java
|
static Token complementRanges(Token token) {
if (token.type != RANGE && token.type != NRANGE)
throw new IllegalArgumentException(
"Token#complementRanges(): must be RANGE: " + token.type);
RangeToken tok = (RangeToken) token;
tok.sortRanges();
tok.compactRanges();
int len = tok.ranges.length + 2;
if (tok.ranges[0] == 0)
len -= 2;
int last = tok.ranges[tok.ranges.length - 1];
if (last == UTF16_MAX)
len -= 2;
RangeToken ret = Token.createRange();
ret.ranges = new int[len];
int wp = 0;
if (tok.ranges[0] > 0) {
ret.ranges[wp++] = 0;
ret.ranges[wp++] = tok.ranges[0] - 1;
}
for (int i = 1; i < tok.ranges.length - 2; i += 2) {
ret.ranges[wp++] = tok.ranges[i] + 1;
ret.ranges[wp++] = tok.ranges[i + 1] - 1;
}
if (last != UTF16_MAX) {
ret.ranges[wp++] = last + 1;
ret.ranges[wp] = UTF16_MAX;
}
ret.setCompacted();
return ret;
}
|
java
|
private static String key2Str(Key k, byte type) {
String s = key2Str_impl(k, type);
Key x;
assert (x = str2Key_impl(s)).equals(k) : "bijection fail " + k + "." + (char) type + " <-> " + s + " <-> " + x;
return s;
}
|
java
|
private static String mapEnglishPennTagSetToNAF(final String postag) {
if (postag.startsWith("RB")) {
return "A"; // adverb
} else if (postag.equalsIgnoreCase("CC")) {
return "C"; // conjunction
} else if (postag.startsWith("D") || postag.equalsIgnoreCase("PDT")) {
return "D"; // determiner and predeterminer
} else if (postag.startsWith("J")) {
return "G"; // adjective
} else if (postag.equalsIgnoreCase("NN")
|| postag.equalsIgnoreCase("NNS")) {
return "N"; // common noun
} else if (postag.startsWith("NNP")) {
return "R"; // proper noun
} else if (postag.equalsIgnoreCase("TO") || postag.equalsIgnoreCase("IN")) {
return "P"; // preposition
} else if (postag.startsWith("PRP") || postag.startsWith("WP")) {
return "Q"; // pronoun
} else if (postag.startsWith("V")) {
return "V"; // verb
} else {
return "O"; // other
}
}
|
java
|
public final <T> TcpServer attr(AttributeKey<T> key, @Nullable T value) {
Objects.requireNonNull(key, "key");
return bootstrap(b -> b.childAttr(key, value));
}
|
java
|
public static List<? extends IResource> chain(IResource resource)
{
List<IResource> chain = new ArrayList<IResource>();
while (true)
{
chain.add(resource);
if (!resource.hasParent())
break;
resource = resource.getParent();
}
return chain;
}
|
java
|
protected void updateRoles(final LoginContext _login,
final Person _person)
throws EFapsException
{
for (final JAASSystem system : JAASSystem.getAllJAASSystems()) {
if (system.getRoleJAASPrincipleClass() != null) {
final Set<?> rolesJaas = _login.getSubject().getPrincipals(system.getRoleJAASPrincipleClass());
final Set<Role> rolesEfaps = new HashSet<>();
for (final Object roleObj : rolesJaas) {
try {
final String roleKey = (String) system.getRoleMethodKey().invoke(roleObj);
final Role roleEfaps = Role.getWithJAASKey(system, roleKey);
if (roleEfaps != null) {
rolesEfaps.add(roleEfaps);
}
} catch (final IllegalAccessException e) {
LoginHandler.LOG.error("could not execute role key method for system " + system.getName(), e);
} catch (final IllegalArgumentException e) {
LoginHandler.LOG.error("could not execute role key method for system " + system.getName(), e);
} catch (final InvocationTargetException e) {
LoginHandler.LOG.error("could not execute role key method for system " + system.getName(), e);
}
}
_person.setRoles(system, rolesEfaps);
}
}
}
|
java
|
public static int writeString(DataOutput out, String s) throws IOException {
if (s.length() > 0xffff/3) { // maybe too long
LOG.warn("truncating long string: " + s.length()
+ " chars, starting with " + s.substring(0, 20));
s = s.substring(0, 0xffff/3);
}
int len = utf8Length(s);
if (len > 0xffff) // double-check length
throw new IOException("string too long!");
out.writeShort(len);
writeChars(out, s, 0, s.length());
return len;
}
|
java
|
public double getDoublePropertyFallback(T property) throws NumberFormatException
{
try
{
return Double.parseDouble(getProperty(property));
}
catch (NumberFormatException e)
{
return Double.parseDouble(getDefaultProperty(property));
}
}
|
python
|
def get_film_id(self, title, three_dimensional=False):
"""get the film id using the title in conjunction with the searching function"""
films = self.film_search(title)
for film in films:
if (film['title'].find('3D') is - 1) is not three_dimensional:
return film['edi']
return -1
|
java
|
@Override
public double getPercentageOfIdentity(boolean countGaps) {
double seqid = getNumIdenticals();
double length = getLength();
if (!countGaps) {
length = length - getAlignedSequence(1).getNumGapPositions()
- getAlignedSequence(2).getNumGapPositions();
}
return seqid / length;
}
|
java
|
@Override
public Future<Void> registerProvider(final String domain,
Object provider,
ProviderQos providerQos,
boolean awaitGlobalRegistration) {
if (providerQos == null) {
throw new JoynrRuntimeException("providerQos == null. It must not be null");
}
ProviderContainer providerContainer = providerContainerFactory.create(provider);
String participantId = participantIdStorage.getProviderParticipantId(domain,
providerContainer.getInterfaceName(),
providerContainer.getMajorVersion());
String defaultPublicKeyId = "";
DiscoveryEntry discoveryEntry = new DiscoveryEntry(getVersionFromAnnotation(provider.getClass()),
domain,
providerContainer.getInterfaceName(),
participantId,
providerQos,
System.currentTimeMillis(),
System.currentTimeMillis() + defaultExpiryTimeMs,
defaultPublicKeyId);
final boolean isGloballyVisible = (discoveryEntry.getQos().getScope() == ProviderScope.GLOBAL);
messageRouter.addNextHop(participantId, libjoynrMessagingAddress, isGloballyVisible);
providerDirectory.add(participantId, providerContainer);
Callback<Void> callback = new Callback<Void>() {
@Override
public void onSuccess(@CheckForNull Void result) {
}
@Override
public void onFailure(JoynrRuntimeException runtimeException) {
logger.error("Unexpected Error while registering Provider:", runtimeException);
}
};
return localDiscoveryAggregator.add(callback, discoveryEntry, awaitGlobalRegistration);
}
|
java
|
protected JsonToken handleBinary() throws IOException {
int size = _in.readInt();
byte subtype = _in.readByte();
Context ctx = getContext();
switch (subtype) {
case BsonConstants.SUBTYPE_BINARY_OLD:
int size2 = _in.readInt();
byte[] buf2 = new byte[size2];
_in.readFully(buf2);
ctx.value = buf2;
break;
case BsonConstants.SUBTYPE_UUID:
long l1 = _in.readLong();
long l2 = _in.readLong();
ctx.value = new UUID(l1, l2);
break;
default:
byte[] buf = new byte[size];
_in.readFully(buf);
ctx.value = buf;
break;
}
return JsonToken.VALUE_EMBEDDED_OBJECT;
}
|
java
|
public List<Integer> getDogmaAttributes(String datasource, String ifNoneMatch) throws ApiException {
ApiResponse<List<Integer>> resp = getDogmaAttributesWithHttpInfo(datasource, ifNoneMatch);
return resp.getData();
}
|
python
|
def register_actions(self, shortcut_manager):
"""Register callback methods fot triggered actions.
:param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings
between shortcuts and actions.
"""
shortcut_manager.add_callback_for_action('close', self.on_close_shortcut)
# Call register_action of parent in order to register actions for child controllers
super(StateMachinesEditorController, self).register_actions(shortcut_manager)
|
python
|
def burst_run(self):
""" Run CPU as fast as Python can... """
# https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots...
get_and_call_next_op = self.get_and_call_next_op
for __ in range(self.outer_burst_op_count):
for __ in range(self.inner_burst_op_count):
get_and_call_next_op()
self.call_sync_callbacks()
|
java
|
public final void entryRuleLiteralCondition() throws RecognitionException {
try {
// InternalXtext.g:596:1: ( ruleLiteralCondition EOF )
// InternalXtext.g:597:1: ruleLiteralCondition EOF
{
before(grammarAccess.getLiteralConditionRule());
pushFollow(FollowSets000.FOLLOW_1);
ruleLiteralCondition();
state._fsp--;
after(grammarAccess.getLiteralConditionRule());
match(input,EOF,FollowSets000.FOLLOW_2);
}
}
catch (RecognitionException re) {
reportError(re);
recover(input,re);
}
finally {
}
return ;
}
|
java
|
private String resolveUri(String rawUri, EntityContext context, boolean handleURIOverride) throws Siren4JException {
String resolvedUri = rawUri;
String baseUri = null;
boolean fullyQualified = false;
if (context.getCurrentObject() instanceof Resource) {
Resource resource = (Resource) context.getCurrentObject();
baseUri = resource.getBaseUri();
fullyQualified = resource.isFullyQualifiedLinks() == null ? false : resource.isFullyQualifiedLinks();
String override = resource.getOverrideUri();
if (handleURIOverride && StringUtils.isNotBlank(override)) {
resolvedUri = override;
}
}
resolvedUri = handleTokenReplacement(resolvedUri, context);
if (fullyQualified && StringUtils.isNotBlank(baseUri)
&& !isAbsoluteUri(resolvedUri)) {
StringBuffer sb = new StringBuffer();
sb.append(baseUri.endsWith("/") ? baseUri.substring(0, baseUri.length() - 1) : baseUri);
sb.append(resolvedUri.startsWith("/") ? resolvedUri : "/" + resolvedUri);
resolvedUri = sb.toString();
}
return resolvedUri;
}
|
python
|
def Parse(self, rdf_data):
"""Process rdf data through filters. Test if results match expectations.
Processing of rdf data is staged by a filter handler, which manages the
processing of host data. The output of the filters are compared against
expected results.
Args:
rdf_data: An list containing 0 or more rdf values.
Returns:
An anomaly if data didn't match expectations.
Raises:
ProcessingError: If rdf_data is not a handled type.
"""
if not isinstance(rdf_data, (list, set)):
raise ProcessingError("Bad host data format: %s" % type(rdf_data))
if self.baseline:
comparison = self.baseliner.Parse(rdf_data)
else:
comparison = rdf_data
found = self.handler.Parse(comparison)
results = self.hint.Render(found)
return self.matcher.Detect(comparison, results)
|
python
|
def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`IndexedRowMatrix`
>>> mat = IndexedRowMatrix(sc.parallelize([(0, (0, 1)), (1, (2, 3))]))
>>> mat.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[IndexedRow(0, [2.0,3.0]), IndexedRow(1, [6.0,11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
return IndexedRowMatrix(self._java_matrix_wrapper.call("multiply", matrix))
|
java
|
public SimplifySpanBuild append(String text) {
if (TextUtils.isEmpty(text)) return this;
mNormalSizeText.append(text);
mStringBuilder.append(text);
return this;
}
|
java
|
public void marshallAsAttribute(final AttributeDefinition attribute,final ModelNode resourceModel, final boolean marshallDefault, final XMLStreamWriter writer) throws XMLStreamException{
throw ControllerLogger.ROOT_LOGGER.couldNotMarshalAttributeAsAttribute(attribute.getName());
}
|
java
|
protected ResponseFuture doInvokeAsync(SofaRequest request, RpcInternalContext rpcContext, int timeoutMillis) {
SofaResponseCallback listener = request.getSofaResponseCallback();
if (listener != null) {
AbstractHttpClientHandler callback = new CallbackInvokeClientHandler(transportConfig.getConsumerConfig(),
transportConfig.getProviderInfo(), listener, request, rpcContext,
ClassLoaderUtils.getCurrentClassLoader());
doSend(request, callback, timeoutMillis);
return null;
} else {
HttpResponseFuture future = new HttpResponseFuture(request, timeoutMillis);
AbstractHttpClientHandler callback = new FutureInvokeClientHandler(transportConfig.getConsumerConfig(),
transportConfig.getProviderInfo(), future, request, rpcContext,
ClassLoaderUtils.getCurrentClassLoader());
doSend(request, callback, timeoutMillis);
future.setSentTime();
return future;
}
}
|
java
|
public ExternalAutoCommitTransaction createAutoCommitTransaction()
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "createAutoCommitTransaction");
ExternalAutoCommitTransaction instance = new MSAutoCommitTransaction(_ms, _persistence, getMaximumTransactionSize());
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "createAutoCommitTransaction", "return="+instance);
return instance;
}
|
java
|
public static synchronized Map<String, Statistics> getStatistics() {
Map<String, Statistics> result = new HashMap<String, Statistics>();
for(Statistics stat: statisticsTable.values()) {
result.put(stat.getScheme(), stat);
}
return result;
}
|
python
|
def _is_in_try_again(self, x, y):
"""Checks if the game is to be restarted."""
if self.won == 1:
# Checks if in try button on won screen.
x1, y1, x2, y2 = self._won_try_again
return x1 <= x < x2 and y1 <= y < y2
elif self.lost:
# Checks if in try button on lost screen.
x1, y1, x2, y2 = self._lost_try_again
return x1 <= x < x2 and y1 <= y < y2
# Otherwise just no.
return False
|
java
|
public static double elementDiagonalMaxAbs( DMatrixD1 a ) {
final int size = Math.min(a.numRows,a.numCols);
double max = 0;
for( int i = 0; i < size; i++ ) {
double val = Math.abs(a.get( i,i ));
if( val > max ) {
max = val;
}
}
return max;
}
|
python
|
def indent(text: str, num: int = 2) -> str:
"""Indent a piece of text."""
lines = text.splitlines()
return "\n".join(indent_iterable(lines, num=num))
|
python
|
def create_head(nf:int, nc:int, lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5,
concat_pool:bool=True, bn_final:bool=False):
"Model head that takes `nf` features, runs through `lin_ftrs`, and about `nc` classes."
lin_ftrs = [nf, 512, nc] if lin_ftrs is None else [nf] + lin_ftrs + [nc]
ps = listify(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += bn_drop_lin(ni, no, True, p, actn)
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
return nn.Sequential(*layers)
|
python
|
def _signal_handler(self, *_) -> None:
"""
On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal.
"""
if self._num_signals == 0:
logging.warning('Interrupt signal caught - training will be terminated')
logging.warning('Another interrupt signal will terminate the program immediately')
self._num_signals += 1
else:
logging.error('Another interrupt signal caught - terminating program immediately')
sys.exit(2)
|
python
|
def performXpath(parent, xpath):
""" Perform an XPath on an element and indicate if we need to loop over it to find something
:param parent: XML Node on which to perform XPath
:param xpath: XPath to run
:return: (Result, Need to loop Indicator)
"""
loop = False
if xpath.startswith(".//"):
result = parent.xpath(
xpath.replace(".//", "./", 1),
namespaces=XPATH_NAMESPACES
)
if len(result) == 0:
result = parent.xpath(
"*[{}]".format(xpath),
namespaces=XPATH_NAMESPACES
)
loop = True
else:
result = parent.xpath(
xpath,
namespaces=XPATH_NAMESPACES
)
return result[0], loop
|
java
|
private Assignment doSamplingRound(FactorGraph factorGraph, Assignment curAssignment) {
Assignment assignment = curAssignment;
int[] variableNums = factorGraph.getVariables().getVariableNumsArray();
for (int i = 0; i < variableNums.length; i++) {
assignment = doSample(factorGraph, assignment, variableNums[i]);
}
return assignment;
}
|
python
|
def recalculate_concepts(self, concepts, lang=None):
"""
Recalculated given concepts for given users
Args:
concepts (dict): user id (int -> set of concepts to recalculate)
lang(Optional[str]): language used to get items in all concepts (cached).
Defaults to None, in that case are get items only in used concepts
"""
if len(concepts) == 0:
return
if lang is None:
items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values()))))
else:
items = Concept.objects.get_concept_item_mapping(lang=lang)
environment = get_environment()
mastery_threshold = get_mastery_trashold()
for user, concepts in concepts.items():
all_items = list(set(flatten([items[c] for c in concepts])))
answer_counts = environment.number_of_answers_more_items(all_items, user)
correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user)
predictions = dict(list(zip(all_items, get_predictive_model().
predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview()))))
new_user_stats = []
stats_to_delete_condition = Q()
for concept in concepts:
answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate(
time_spent=Sum("response_time"),
sessions=Count("session", True),
time_first=Min("time"),
time_last=Max("time"),
)
stats = {
"answer_count": sum(answer_counts[i] for i in items[concept]),
"correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]),
"item_count": len(items[concept]),
"practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]),
"mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]),
"prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]),
"time_spent": answer_aggregates["time_spent"] / 1000,
"session_count": answer_aggregates["sessions"],
"time_first": answer_aggregates["time_first"].timestamp(),
"time_last": answer_aggregates["time_last"].timestamp(),
}
stats_to_delete_condition |= Q(user=user, concept=concept)
for stat_name, value in stats.items():
new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value))
self.filter(stats_to_delete_condition).delete()
self.bulk_create(new_user_stats)
|
java
|
public ListenerUtils putListener(String tag, SuperToast.OnDismissListener onDismissListener) {
this.mOnDismissListenerHashMap.put(tag, onDismissListener);
return this;
}
|
python
|
def all_terminated():
"""For each remote shell determine if its terminated"""
instances_found = False
for i in all_instances():
instances_found = True
if i.state not in (remote_dispatcher.STATE_TERMINATED,
remote_dispatcher.STATE_DEAD):
return False
return instances_found
|
java
|
public void releaseMe() {
Ancestor eot = Stapler.getCurrentRequest().findAncestor(BoundObjectTable.class);
if (eot==null)
throw new IllegalStateException("The thread is not handling a request to a abound object");
String id = eot.getNextToken(0);
resolve(false).release(id); // resolve(false) can't fail because we are processing this request now.
}
|
java
|
@Override
public Response toResponse(final AccessDeniedException e) {
debugException(this, e, LOGGER);
return status(FORBIDDEN).build();
}
|
python
|
def moment1(self):
"""The first time delay weighted statistical moment of the
instantaneous unit hydrograph."""
delays, response = self.delay_response_series
return statstools.calc_mean_time(delays, response)
|
python
|
def get_downsample_pct(in_bam, target_counts, data):
"""Retrieve percentage of file to downsample to get to target counts.
Avoids minimal downsample which is not especially useful for
improving QC times; 90& or more of reads.
"""
total = sum(x.aligned for x in idxstats(in_bam, data))
with pysam.Samfile(in_bam, "rb") as work_bam:
n_rgs = max(1, len(work_bam.header.get("RG", [])))
rg_target = n_rgs * target_counts
if total > rg_target:
pct = float(rg_target) / float(total)
if pct < 0.9:
return pct
|
java
|
public void marshall(AssociateIpGroupsRequest associateIpGroupsRequest, ProtocolMarshaller protocolMarshaller) {
if (associateIpGroupsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(associateIpGroupsRequest.getDirectoryId(), DIRECTORYID_BINDING);
protocolMarshaller.marshall(associateIpGroupsRequest.getGroupIds(), GROUPIDS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
java
|
public static <K, V, NV> Map<K, NV> newFilteredChangedValueWithEntryMap(
Map<K, V> map, Predicate<Entry<K, V>> filter,
Function<Entry<K, V>, NV> changingValueFunction) {
return getEntryStreamWithFilter(map, filter).collect(
toMap(Entry::getKey, changingValueFunction::apply));
}
|
python
|
def quote_edge(identifier):
"""Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam')
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s'
"""
node, _, rest = identifier.partition(':')
parts = [quote(node)]
if rest:
port, _, compass = rest.partition(':')
parts.append(quote(port))
if compass:
parts.append(compass)
return ':'.join(parts)
|
python
|
def __bad_transition(self, state):
"""
Raises an C{AssertionError} exception for an invalid state transition.
@see: L{stateNames}
@type state: int
@param state: Intended breakpoint state.
@raise Exception: Always.
"""
statemsg = ""
oldState = self.stateNames[ self.get_state() ]
newState = self.stateNames[ state ]
msg = "Invalid state transition (%s -> %s)" \
" for breakpoint at address %s"
msg = msg % (oldState, newState, HexDump.address(self.get_address()))
raise AssertionError(msg)
|
java
|
public URL asDirectoryURL() throws MalformedURLException {
final String pathName = getPathName(false);
return new URL(VFSUtils.VFS_PROTOCOL, "", -1, parent == null ? pathName : pathName + "/", VFSUtils.VFS_URL_HANDLER);
}
|
java
|
@Override
public void removeSection(int identity) throws InvalidRecoverableUnitSectionException, InternalLogException
{
if (tc.isEntryEnabled())
Tr.entry(tc, "removeSection", new java.lang.Object[] { this, new Integer(identity) });
// REQD: Implementation not yet provided. No users of the RLS currently require this operation.
if (tc.isEntryEnabled())
Tr.exit(tc, "removeSection", "UnsupportedOperationException");
throw new java.lang.UnsupportedOperationException();
}
|
java
|
public <OUT> IPromise serve(Publisher<OUT> source, ActorPublisher networRxPublisher, boolean closeConnectionOnCompleteOrError, Consumer<Actor> disconCB) {
if ( networRxPublisher.getClass().getSimpleName().equals("HttpPublisher") ) {
throw new RuntimeException("Http long poll cannot be supported. Use WebSockets instead.");
}
if (source instanceof KxPublisherActor == false || source instanceof ActorProxy == false ) {
Processor<OUT, OUT> proc = newAsyncProcessor(a -> a); // we need a queue before going to network
source.subscribe(proc);
source = proc;
}
((KxPublisherActor)source).setCloseOnComplete(closeConnectionOnCompleteOrError);
return networRxPublisher.facade((Actor) source).publish(disconCB);
}
|
java
|
@Nullable
private TableReportEntryTable convertToTableReportEntryTable(
String tableId, Map<String, Object> map, Predicate<String> placementFilter,
Predicate<Boolean> droppedFilter, Predicate<Boolean> facadeFilter) {
// Check the filters for placement, dropped, and facade
String placement = (String) map.get("placement");
if (!placementFilter.apply(placement)) {
return null;
}
Boolean dropped = Objects.firstNonNull((Boolean) map.get("dropped"), false);
if (!droppedFilter.apply(dropped)) {
return null;
}
Boolean facade = Objects.firstNonNull((Boolean) map.get("facade"), false);
if (!facadeFilter.apply(facade)) {
return null;
}
List<Integer> shards = Lists.newArrayList();
// Aggregate the column, size and update statistics across all shards.
TableStatistics.Aggregator aggregator = TableStatistics.newAggregator();
Object shardJson = map.get("shards");
if (shardJson != null) {
Map<String, TableStatistics> shardMap = JsonHelper.convert(
shardJson, new TypeReference<Map<String, TableStatistics>>() {
});
for (Map.Entry<String, TableStatistics> entry : shardMap.entrySet()) {
Integer shardId = Integer.parseInt(entry.getKey());
shards.add(shardId);
aggregator.add(entry.getValue());
}
}
TableStatistics tableStatistics = aggregator.aggregate();
Collections.sort(shards);
return new TableReportEntryTable(tableId, placement, shards, dropped, facade, tableStatistics.getRecordCount(),
tableStatistics.getColumnStatistics().toStatistics(),
tableStatistics.getSizeStatistics().toStatistics(),
tableStatistics.getUpdateTimeStatistics().toStatistics());
}
|
python
|
def get_dynamical_matrix_at_q(self, q):
"""Calculate dynamical matrix at a given q-point
Parameters
----------
q: array_like
A q-vector.
shape=(3,), dtype='double'
Returns
-------
dynamical_matrix: ndarray
Dynamical matrix.
shape=(bands, bands), dtype='complex'
"""
self._set_dynamical_matrix()
if self._dynamical_matrix is None:
msg = ("Dynamical matrix has not yet built.")
raise RuntimeError(msg)
self._dynamical_matrix.set_dynamical_matrix(q)
return self._dynamical_matrix.get_dynamical_matrix()
|
java
|
public void advanceNodeStage() {
this.setQueryStageTimeStamp(Calendar.getInstance().getTime());
switch (this.nodeStage) {
case NODEBUILDINFO_EMPTYNODE:
try {
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_PROTOINFO);
this.controller.identifyNode(this.nodeId);
} catch (SerialInterfaceException e) {
logger.error("Got error: {}, while identifying node {}", e.getLocalizedMessage(), this.nodeId);
}
break;
case NODEBUILDINFO_PROTOINFO:
if (nodeId != this.controller.getOwnNodeId())
{
ZWaveNoOperationCommandClass zwaveCommandClass = (ZWaveNoOperationCommandClass)supportedCommandClasses.get(ZWaveCommandClass.CommandClass.NO_OPERATION);
if (zwaveCommandClass == null)
break;
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_PING);
this.controller.sendData(zwaveCommandClass.getNoOperationMessage());
} else
{
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_DONE); // nothing more to do for this node.
}
break;
case NODEBUILDINFO_PING:
case NODEBUILDINFO_WAKEUP:
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_DETAILS);
this.controller.requestNodeInfo(nodeId);
break;
case NODEBUILDINFO_DETAILS:
// try and get the manufacturerSpecific command class.
ZWaveManufacturerSpecificCommandClass manufacturerSpecific = (ZWaveManufacturerSpecificCommandClass)this.getCommandClass(ZWaveCommandClass.CommandClass.MANUFACTURER_SPECIFIC);
if (manufacturerSpecific != null) {
// if this node implements the Manufacturer Specific command class, we use it to get manufacturer info.
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_MANSPEC01);
this.controller.sendData(manufacturerSpecific.getManufacturerSpecificMessage());
break;
}
logger.warn("Node {} does not support MANUFACTURER_SPECIFIC, proceeding to version node stage.", this.getNodeId());
case NODEBUILDINFO_MANSPEC01:
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_VERSION); // nothing more to do for this node.
// try and get the version command class.
ZWaveVersionCommandClass version = (ZWaveVersionCommandClass)this.getCommandClass(ZWaveCommandClass.CommandClass.VERSION);
boolean checkVersionCalled = false;
for (ZWaveCommandClass zwaveCommandClass : this.getCommandClasses()) {
if (version != null && zwaveCommandClass.getMaxVersion() > 1) {
version.checkVersion(zwaveCommandClass); // check version for this command class.
checkVersionCalled = true;
} else
zwaveCommandClass.setVersion(1);
}
if (checkVersionCalled) // wait for another call of advanceNodeStage before continuing.
break;
case NODEBUILDINFO_VERSION:
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_INSTANCES); // nothing more to do for this node.
// try and get the multi instance / channel command class.
ZWaveMultiInstanceCommandClass multiInstance = (ZWaveMultiInstanceCommandClass)this.getCommandClass(ZWaveCommandClass.CommandClass.MULTI_INSTANCE);
if (multiInstance != null) {
multiInstance.initEndpoints();
break;
}
logger.trace("Node {} does not support MULTI_INSTANCE, proceeding to static node stage.", this.getNodeId());
case NODEBUILDINFO_INSTANCES:
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_STATIC);
if (queriesPending == -1) {
queriesPending = 0;
for (ZWaveCommandClass zwaveCommandClass : this.getCommandClasses()) {
logger.trace("Inspecting command class {}", zwaveCommandClass.getCommandClass().getLabel());
if (zwaveCommandClass instanceof ZWaveCommandClassInitialization) {
logger.debug("Found initializable command class {}", zwaveCommandClass.getCommandClass().getLabel());
ZWaveCommandClassInitialization zcci = (ZWaveCommandClassInitialization)zwaveCommandClass;
int instances = zwaveCommandClass.getInstances();
if (instances == 0)
{
Collection<SerialMessage> initqueries = zcci.initialize();
for (SerialMessage serialMessage : initqueries) {
this.controller.sendData(serialMessage);
queriesPending++;
}
} else {
for (int i = 1; i <= instances; i++) {
Collection<SerialMessage> initqueries = zcci.initialize();
for (SerialMessage serialMessage : initqueries) {
this.controller.sendData(this.encapsulate(serialMessage, zwaveCommandClass, i));
queriesPending++;
}
}
}
} else if (zwaveCommandClass instanceof ZWaveMultiInstanceCommandClass) {
ZWaveMultiInstanceCommandClass multiInstanceCommandClass = (ZWaveMultiInstanceCommandClass)zwaveCommandClass;
for (ZWaveEndpoint endpoint : multiInstanceCommandClass.getEndpoints()) {
for (ZWaveCommandClass endpointCommandClass : endpoint.getCommandClasses()) {
logger.trace("Inspecting command class {} for endpoint {}", endpointCommandClass.getCommandClass().getLabel(), endpoint.getEndpointId());
if (endpointCommandClass instanceof ZWaveCommandClassInitialization) {
logger.debug("Found initializable command class {}", endpointCommandClass.getCommandClass().getLabel());
ZWaveCommandClassInitialization zcci2 = (ZWaveCommandClassInitialization)endpointCommandClass;
Collection<SerialMessage> initqueries = zcci2.initialize();
for (SerialMessage serialMessage : initqueries) {
this.controller.sendData(this.encapsulate(serialMessage, endpointCommandClass, endpoint.getEndpointId()));
queriesPending++;
}
}
}
}
}
}
}
if (queriesPending-- > 0) // there is still something to be initialized.
break;
logger.trace("Done getting static values, proceeding to done node stage.", this.getNodeId());
case NODEBUILDINFO_STATIC:
this.setNodeStage(ZWaveNode.NodeStage.NODEBUILDINFO_DONE); // nothing more to do for this node.
initializationComplete = true;
if (this.isListening())
return;
ZWaveWakeUpCommandClass wakeup = (ZWaveWakeUpCommandClass)this.getCommandClass(ZWaveCommandClass.CommandClass.WAKE_UP);
if (wakeup == null)
return;
logger.debug("Node {} is a battery operated device. Tell it to go to sleep.", this.getNodeId());
this.controller.sendData(wakeup.getNoMoreInformationMessage());
break;
case NODEBUILDINFO_DONE:
case NODEBUILDINFO_DEAD:
break;
default:
logger.error("Unknown node state {} encountered on Node {}", this.nodeStage.getLabel(), this.getNodeId());
}
}
|
java
|
@Deprecated
public Add add(long projectId, String deviceId, String deviceName, String deviceType,
Date created) {
return add(projectId, new Device.Spec(deviceId, deviceName, deviceType), created);
}
|
java
|
public GenericsContext parameterTypeAs(final int pos, final Class<?> asType) {
checkParameter(pos);
return inlyingTypeAs(meth.getGenericParameterTypes()[pos], asType);
}
|
java
|
@SuppressWarnings("unchecked") // cache ensures types match
@Override public <T> ModuleAdapter<T> getModuleAdapter(Class<T> type) {
return (ModuleAdapter<T>) loadedAdapters.get(type);
}
|
java
|
private void setWeekDays(SortedSet<WeekDay> weekDays) {
List<CmsCheckBox> checked = new ArrayList<CmsCheckBox>();
for (WeekDay day : weekDays) {
for (CmsCheckBox box : m_checkboxes) {
if (box.getInternalValue().equals(day.toString())) {
checked.add(box);
}
}
}
for (CmsCheckBox box : m_checkboxes) {
if (checked.contains(box)) {
box.setChecked(true);
} else {
box.setChecked(false);
}
}
}
|
java
|
public int getTotalPage() {
int i = totalNum % pageSize;
int j = totalNum / pageSize;
return i == 0 ? j : j + 1;
}
|
python
|
def separation_from(self, another_icrf):
"""Return the angle between this position and another.
>>> print(ICRF([1,0,0]).separation_from(ICRF([1,1,0])))
45deg 00' 00.0"
You can also compute separations across an array of positions.
>>> directions = ICRF([[1,0,-1,0], [0,1,0,-1], [0,0,0,0]])
>>> directions.separation_from(ICRF([0,1,0])).degrees
array([ 90., 0., 90., 180.])
"""
p1 = self.position.au
p2 = another_icrf.position.au
u1 = p1 / length_of(p1)
u2 = p2 / length_of(p2)
if u2.ndim > 1:
if u1.ndim == 1:
u1 = u1[:,None]
elif u1.ndim > 1:
u2 = u2[:,None]
c = dots(u1, u2)
return Angle(radians=arccos(clip(c, -1.0, 1.0)))
|
java
|
public long getDateLastVisitedBy(CmsRequestContext context, String poolName, CmsUser user, CmsResource resource)
throws CmsException {
CmsDbContext dbc = m_dbContextFactory.getDbContext(context);
long result = 0;
try {
result = m_driverManager.getDateLastVisitedBy(dbc, poolName, user, resource);
} catch (Exception e) {
dbc.report(
null,
Messages.get().container(
Messages.ERR_GET_DATE_LASTVISITED_2,
user.getName(),
context.getSitePath(resource)),
e);
} finally {
dbc.clear();
}
return result;
}
|
java
|
@Override
public Date computeFirstFireTime (final ICalendar calendar)
{
m_aNextFireTime = getFireTimeAfter (new Date (getStartTime ().getTime () - 1000L));
// Check calendar for date-time exclusion
while (m_aNextFireTime != null && calendar != null && !calendar.isTimeIncluded (m_aNextFireTime.getTime ()))
{
m_aNextFireTime = getFireTimeAfter (m_aNextFireTime);
if (m_aNextFireTime == null)
break;
// avoid infinite loop
final Calendar c = PDTFactory.createCalendar ();
c.setTime (m_aNextFireTime);
if (c.get (Calendar.YEAR) > CQuartz.MAX_YEAR)
{
return null;
}
}
return m_aNextFireTime;
}
|
java
|
public DataLakeAnalyticsAccountInner create(String resourceGroupName, String name, DataLakeAnalyticsAccountInner parameters) {
return createWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().last().body();
}
|
java
|
protected Object getProxyFromResultSet() throws PersistenceBrokerException
{
// 1. get Identity of current row:
Identity oid = getIdentityFromResultSet();
// 2. return a Proxy instance:
return getBroker().createProxy(getItemProxyClass(), oid);
}
|
python
|
def _save_to_history(word, data_dir):
"""Write word to history file.
Parameters
----------
word : str
Word to save to history.
data_dir : pathlib.Path
Directory where history file should be saved.
data_dir and it's parent directories will be created if needed.
"""
if not data_dir.exists():
logger.debug("Creating DATA DIR: %s", data_dir.as_posix())
data_dir.mkdir(parents=True)
if word not in _get_words(data_dir):
with open(data_dir.joinpath("words.txt"), mode="a+") as f:
logger.debug("Adding to history: %s", word)
f.write(word + "\n")
|
python
|
def name(self):
"""str: name of the file entry, which does not include the full path."""
if self._name is None:
location = getattr(self.path_spec, 'location', None)
if location is not None:
self._name = self._file_system.BasenamePath(location)
else:
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is not None:
self._name = 'apfs{0:d}'.format(volume_index + 1)
else:
self._name = ''
return self._name
|
java
|
public CloseableIterator<Entry<KeyType>> combine(
List<? extends CloseableIterator<Entry<KeyType>>> sortedIterators,
List<String> mergedDictionary
)
{
// CombineBuffer is initialized when this method is called and closed after the result iterator is done
final Closer closer = Closer.create();
try {
final ByteBuffer combineBuffer = combineBufferHolder.get();
final int minimumRequiredBufferCapacity = StreamingMergeSortedGrouper.requiredBufferCapacity(
combineKeySerdeFactory.factorizeWithDictionary(mergedDictionary),
combiningFactories
);
// We want to maximize the parallelism while the size of buffer slice is greater than the minimum buffer size
// required by StreamingMergeSortedGrouper. Here, we find the leafCombineDegree of the cominbing tree and the
// required number of buffers maximizing the parallelism.
final Pair<Integer, Integer> degreeAndNumBuffers = findLeafCombineDegreeAndNumBuffers(
combineBuffer,
minimumRequiredBufferCapacity,
concurrencyHint,
sortedIterators.size()
);
final int leafCombineDegree = degreeAndNumBuffers.lhs;
final int numBuffers = degreeAndNumBuffers.rhs;
final int sliceSize = combineBuffer.capacity() / numBuffers;
final Supplier<ByteBuffer> bufferSupplier = createCombineBufferSupplier(combineBuffer, numBuffers, sliceSize);
final Pair<List<CloseableIterator<Entry<KeyType>>>, List<Future>> combineIteratorAndFutures = buildCombineTree(
sortedIterators,
bufferSupplier,
combiningFactories,
leafCombineDegree,
mergedDictionary
);
final CloseableIterator<Entry<KeyType>> combineIterator = Iterables.getOnlyElement(combineIteratorAndFutures.lhs);
final List<Future> combineFutures = combineIteratorAndFutures.rhs;
closer.register(() -> checkCombineFutures(combineFutures));
return CloseableIterators.wrap(combineIterator, closer);
}
catch (Throwable t) {
try {
closer.close();
}
catch (Throwable t2) {
t.addSuppressed(t2);
}
throw t;
}
}
|
python
|
def _mark_started(self):
"""
Set the state information for a task once it has completely started.
In particular, the time limit is applied as of this time (ie after
and start delay has been taking.
"""
log = self._params.get('log', self._discard)
now = time.time()
self._started = now
limit = self._config_running.get('time_limit')
try:
limit = float(_fmt_context(self._get(limit, default='0'), self._context))
if limit > 0:
log.debug("Applying task '%s' time limit of %s", self._name, deltafmt(limit))
self._limit = now + limit
except Exception as e:
log.warn("Task '%s' time_limit value '%s' invalid -- %s",
self._name, limit, e, exc_info=log.isEnabledFor(logging.DEBUG))
|
python
|
def addWatchedDirectory(self, dirname, watching=Purr.WATCHED, save_config=True):
"""Starts watching the specified directories for changes"""
# see if we're alredy watching this exact set of directories -- do nothing if so
dirname = Purr.canonizePath(dirname)
# do nothing if already watching
if dirname in self.watched_dirs:
dprint(1, "addWatchDirectory(): already watching %s\n", dirname)
# watching=None means do not change the watch-state
if watching is None:
return
else:
if watching is None:
watching = Purr.WATCHED
# make watcher object
wdir = Purrer.WatchedDir(dirname, mtime=self.timestamp,
watch_patterns=self._watch_patterns, ignore_patterns=self._ignore_patterns)
# fileset=None indicates error reading directory, so ignore it
if wdir.fileset is None:
print("There was an error reading the directory %s, will stop watching it." % dirname)
self.setWatchingState(dirname, Purr.REMOVED, save_config=True)
return
self.watchers[dirname] = wdir
self.watched_dirs.append(dirname)
dprintf(2, "watching directory %s, mtime %s, %d files\n",
dirname, time.strftime("%x %X", time.localtime(wdir.mtime)), len(wdir.fileset))
# find files in this directory matching the watch_patterns, and watch them for changes
watchset = set()
for patt in self._watch_patterns:
watchset.update(fnmatch.filter(wdir.fileset, patt))
for fname in watchset:
quiet = matches_patterns(fname, self._quiet_patterns)
fullname = Purr.canonizePath(os.path.join(dirname, fname))
if fullname not in self.watchers:
wfile = Purrer.WatchedFile(fullname, quiet=quiet, mtime=self.timestamp)
self.watchers[fullname] = wfile
dprintf(3, "watching file %s, timestamp %s, quiet %d\n",
fullname, time.strftime("%x %X", time.localtime(wfile.mtime)), quiet)
# find subdirectories matching the subdir_patterns, and watch them for changes
for fname in wdir.fileset:
fullname = Purr.canonizePath(os.path.join(dirname, fname))
if os.path.isdir(fullname):
for desc, dir_patts, canary_patts in self._subdir_patterns:
if matches_patterns(fname, dir_patts):
quiet = matches_patterns(fname, self._quiet_patterns)
wdir = Purrer.WatchedSubdir(fullname, canary_patterns=canary_patts, quiet=quiet,
mtime=self.timestamp)
self.watchers[fullname] = wdir
dprintf(3, "watching subdirectory %s/{%s}, timestamp %s, quiet %d\n",
fullname, ",".join(canary_patts),
time.strftime("%x %X", time.localtime(wdir.mtime)), quiet)
break
# set state and save config
self.setWatchingState(dirname, watching, save_config=save_config)
|
python
|
def query_segdb(cls, flag, *args, **kwargs):
"""Query the initial LIGO segment database for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
warnings.warn("query_segdb is deprecated and will be removed in a "
"future release", DeprecationWarning)
# parse arguments
qsegs = _parse_query_segments(args, cls.query_segdb)
# process query
try:
flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs)
except TypeError as exc:
if 'DataQualityDict' in str(exc):
raise TypeError(str(exc).replace('DataQualityDict',
cls.__name__))
else:
raise
if len(flags) > 1:
raise RuntimeError("Multiple flags returned for single query, "
"something went wrong:\n %s"
% '\n '.join(flags.keys()))
elif len(flags) == 0:
raise RuntimeError("No flags returned for single query, "
"something went wrong.")
return flags[flag]
|
python
|
def timeout(seconds):
"""
Raises a TimeoutError if a function does not terminate within
specified seconds.
"""
def _timeout_error(signal, frame):
raise TimeoutError("Operation did not finish within \
{} seconds".format(seconds))
def timeout_decorator(func):
@wraps(func)
def timeout_wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _timeout_error)
signal.alarm(seconds)
try:
return func(*args, **kwargs)
finally:
signal.alarm(0)
return timeout_wrapper
return timeout_decorator
|
java
|
public static long MurmurHash3_x64_64_direct(MemoryAccessor mem, long base, int offset, int len) {
return MurmurHash3_x64_64(mem.isBigEndian() ? NARROW_DIRECT_LOADER : WIDE_DIRECT_LOADER,
mem, base + offset, len, DEFAULT_MURMUR_SEED);
}
|
python
|
def get_nfc_quick_check_property(value, is_bytes=False):
"""Get `NFC QUICK CHECK` property."""
obj = unidata.ascii_nfc_quick_check if is_bytes else unidata.unicode_nfc_quick_check
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['nfcquickcheck'].get(negated, negated)
else:
value = unidata.unicode_alias['nfcquickcheck'].get(value, value)
return obj[value]
|
python
|
def as_stream(self):
"""Convert this selector to a DataStream.
This function will only work if this is a singular selector that
matches exactly one DataStream.
"""
if not self.singular:
raise ArgumentError("Attempted to convert a non-singular selector to a data stream, it matches multiple", selector=self)
return DataStream(self.match_type, self.match_id, self.match_spec == DataStreamSelector.MatchSystemOnly)
|
java
|
public List<ExpandedAnnotation> expand(final Annotation targetAnno) {
Objects.requireNonNull(targetAnno);
final List<ExpandedAnnotation> expandedList = new ArrayList<>();
if(isRepeated(targetAnno)) {
// 繰り返しのアノテーションの場合、要素を抽出する。
try {
final Method method = targetAnno.getClass().getMethod("value");
final Annotation[] annos = (Annotation[]) method.invoke(targetAnno);
int index = 0;
for(Annotation anno : annos) {
final List<ExpandedAnnotation> repeatedAnnos = expand(anno);
for(ExpandedAnnotation repeatedAnno : repeatedAnnos) {
repeatedAnno.setIndex(index);
}
expandedList.addAll(repeatedAnnos);
index++;
}
} catch (Exception e) {
throw new RuntimeException("fail get repeated value attribute.", e);
}
} else if(isComposed(targetAnno)) {
final ExpandedAnnotation composedAnno = new ExpandedAnnotation(targetAnno, true);
// 合成のアノテーションの場合、メタアノテーションを子供としてさらに抽出する。
final List<Annotation> childAnnos = Arrays.asList(targetAnno.annotationType().getAnnotations());
for(Annotation anno : childAnnos) {
final List<ExpandedAnnotation> nestedAnnos = expand(anno).stream()
.map(nestedAnno -> overrideAttribute(targetAnno, nestedAnno))
.collect(Collectors.toList());
composedAnno.addChilds(nestedAnnos);
}
Collections.sort(composedAnno.getChilds(), comparator);
expandedList.add(composedAnno);
} else {
// 通常のアノテーションの場合
expandedList.add(new ExpandedAnnotation(targetAnno, false));
}
Collections.sort(expandedList, comparator);
return expandedList;
}
|
python
|
def set_data(self, pos=None, color=None, width=None, connect=None,
arrows=None):
"""Set the data used for this visual
Parameters
----------
pos : array
Array of shape (..., 2) or (..., 3) specifying vertex coordinates.
color : Color, tuple, or array
The color to use when drawing the line. If an array is given, it
must be of shape (..., 4) and provide one rgba color per vertex.
Can also be a colormap name, or appropriate `Function`.
width:
The width of the line in px. Line widths > 1px are only
guaranteed to work when using 'agg' method.
connect : str or array
Determines which vertices are connected by lines.
* "strip" causes the line to be drawn with each vertex
connected to the next.
* "segments" causes each pair of vertices to draw an
independent line segment
* numpy arrays specify the exact set of segment pairs to
connect.
arrows : array
A Nx4 matrix where each row contains the x and y coordinate of the
first and second vertex of the arrow body. Remember that the second
vertex is used as center point for the arrow head, and the first
vertex is only used for determining the arrow head orientation.
"""
if arrows is not None:
self._arrows = arrows
self._arrows_changed = True
LineVisual.set_data(self, pos, color, width, connect)
|
python
|
def add_filter(self, name, filter_values):
"""
Add a filter for a facet.
"""
# normalize the value into a list
if not isinstance(filter_values, (tuple, list)):
if filter_values is None:
return
filter_values = [filter_values, ]
# remember the filter values for use in FacetedResponse
self.filter_values[name] = filter_values
# get the filter from the facet
f = self.facets[name].add_filter(filter_values)
if f is None:
return
self._filters[name] = f
|
python
|
def wait_for_deletion(self, interval=1):
"""
Wait for the online model to be deleted.
:param interval: check interval
"""
deleted = False
while True:
try:
if self.status != OnlineModel.Status.DELETING:
break
except errors.NoSuchObject:
deleted = True
break
time.sleep(interval)
if not deleted:
if self.status == OnlineModel.Status.DELETE_FAILED:
raise OnlineModelError(self.last_fail_msg, self)
else:
raise OnlineModelError('Unexpected status occurs: %s' % self.status.value, self)
|
java
|
public void setSla(boolean newSla) {
boolean oldSla = sla;
sla = newSla;
boolean oldSlaESet = slaESet;
slaESet = true;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, BpsimPackage.PARAMETER__SLA, oldSla, sla, !oldSlaESet));
}
|
java
|
public FileEntry put( DirectoryEntry parent, String name, byte[] content )
throws IOException
{
return put( parent, name, new ByteArrayInputStream( content ) );
}
|
python
|
def format_duration(minutes, human = True):
"""formats duration in a human readable format.
accepts either minutes or timedelta"""
if isinstance(minutes, dt.timedelta):
minutes = duration_minutes(minutes)
if not minutes:
if human:
return ""
else:
return "00:00"
if minutes < 0:
# format_duration did not work for negative values anyway
# return a warning
return "NEGATIVE"
hours = minutes / 60
minutes = minutes % 60
formatted_duration = ""
if human:
if minutes % 60 == 0:
# duration in round hours
formatted_duration += ("%dh") % (hours)
elif hours == 0:
# duration less than hour
formatted_duration += ("%dmin") % (minutes % 60.0)
else:
# x hours, y minutes
formatted_duration += ("%dh %dmin") % (hours, minutes % 60)
else:
formatted_duration += "%02d:%02d" % (hours, minutes)
return formatted_duration
|
python
|
def full_path(self):
"""Absolute system path to the node"""
if self.parent:
return os.path.join(self.parent.full_path, self.name)
return self.name
|
python
|
def smooth_frames(self, sigtype='physical'):
"""
Convert expanded signals with different samples/frame into
a uniform numpy array.
Input parameters
- sigtype (default='physical'): Specifies whether to mooth
the e_p_signal field ('physical'), or the e_d_signal
field ('digital').
"""
spf = self.samps_per_frame[:]
for ch in range(len(spf)):
if spf[ch] is None:
spf[ch] = 1
# Total samples per frame
tspf = sum(spf)
if sigtype == 'physical':
n_sig = len(self.e_p_signal)
sig_len = int(len(self.e_p_signal[0])/spf[0])
signal = np.zeros((sig_len, n_sig), dtype='float64')
for ch in range(n_sig):
if spf[ch] == 1:
signal[:, ch] = self.e_p_signal[ch]
else:
for frame in range(spf[ch]):
signal[:, ch] += self.e_p_signal[ch][frame::spf[ch]]
signal[:, ch] = signal[:, ch] / spf[ch]
elif sigtype == 'digital':
n_sig = len(self.e_d_signal)
sig_len = int(len(self.e_d_signal[0])/spf[0])
signal = np.zeros((sig_len, n_sig), dtype='int64')
for ch in range(n_sig):
if spf[ch] == 1:
signal[:, ch] = self.e_d_signal[ch]
else:
for frame in range(spf[ch]):
signal[:, ch] += self.e_d_signal[ch][frame::spf[ch]]
signal[:, ch] = signal[:, ch] / spf[ch]
else:
raise ValueError("sigtype must be 'physical' or 'digital'")
return signal
|
python
|
def get_incomings_per_page(self, per_page=1000, page=1, params=None):
"""
Get incomings per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=INCOMINGS, per_page=per_page, page=page, params=params)
|
python
|
def connect_entry_signals():
"""
Connect all the signals on Entry model.
"""
post_save.connect(
ping_directories_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_DIRECTORIES)
post_save.connect(
ping_external_urls_handler, sender=Entry,
dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS)
post_save.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE)
post_delete.connect(
flush_similar_cache_handler, sender=Entry,
dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE)
|
java
|
public void setValue(int numStrings, String[] newValues) {
value.clear();
if (numStrings == newValues.length) {
for (int i = 0; i < newValues.length; i++) {
value.add(newValues[i]);
}
}
else {
Log.e(TAG, "X3D MFString setValue() numStrings not equal total newValues");
}
}
|
java
|
public static double findMin(double min, double max, Function1D f, double eps, int maxSteps)
{
double a = min, b = max;
double fa = f.f(a), fb = f.f(b);
double c = b - goldenRatio * (b - a);
double d = a + goldenRatio * (b - a);
double fc = f.f(c);
double fd = f.f(d);
while(Math.abs(c-d) > eps && maxSteps-- > 0)
{
if (fc < fd)
{
// (b, f(b)) ← (d, f(d))
b = d;
fb = fd;
//(d, f(d)) ← (c, f(c))
d = c;
fd = fc;
// update c = b + φ (a - b) and f(c)
c = b - goldenRatio * (b - a);
fc = f.f(c);
}
else
{
//(a, f(a)) ← (c, f(c))
a = c;
fa = fc;
//(c, f(c)) ← (d, f(d))
c = d;
fc = fd;
// update d = a + φ (b - a) and f(d)
d = a + goldenRatio * (b - a);
fd = f.f(d);
}
}
return (a+b)/2;
}
|
python
|
def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0):
"""
get current source density approximation using set of local field potentials with equidistant spacing
first performs a lowpass filter
lfps is a list or numpy array of LFPs arranged spatially by column
spacing is in microns
"""
datband = getbandpass(lfps,sampr,minf,maxf)
if datband.shape[0] > datband.shape[1]: # take CSD along smaller dimension
ax = 1
else:
ax = 0
# can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that
# way would have same number of channels in CSD and LFP (but not critical, and would take more RAM);
if vaknin: datband = Vaknin(datband)
if norm: removemean(datband,ax=ax)
# NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red,
# and positive values (hyperpolarizing intracellular current) drawn in blue
CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 # now each column (or row) is an electrode -- CSD along electrodes
return CSD
|
java
|
public static String getLocalizedDateTimePattern(
FormatStyle dateStyle, FormatStyle timeStyle, Chronology chrono, Locale locale) {
Jdk8Methods.requireNonNull(locale, "locale");
Jdk8Methods.requireNonNull(chrono, "chrono");
if (dateStyle == null && timeStyle == null) {
throw new IllegalArgumentException("Either dateStyle or timeStyle must be non-null");
}
DateFormat dateFormat;
if (dateStyle != null) {
if (timeStyle != null) {
dateFormat = DateFormat.getDateTimeInstance(dateStyle.ordinal(), timeStyle.ordinal(), locale);
} else {
dateFormat = DateFormat.getDateInstance(dateStyle.ordinal(), locale);
}
} else {
dateFormat = DateFormat.getTimeInstance(timeStyle.ordinal(), locale);
}
if (dateFormat instanceof SimpleDateFormat) {
return ((SimpleDateFormat) dateFormat).toPattern();
}
throw new IllegalArgumentException("Unable to determine pattern");
}
|
python
|
def collapse(iterable, base_type=None, levels=None):
"""Flatten an iterable with multiple levels of nesting (e.g., a list of
lists of tuples) into non-iterable types.
>>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
>>> list(collapse(iterable))
[1, 2, 3, 4, 5, 6]
Binary and text strings are not considered iterable and
will not be collapsed.
To avoid collapsing other types, specify *base_type*:
>>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
>>> list(collapse(iterable, base_type=tuple))
['ab', ('cd', 'ef'), 'gh', 'ij']
Specify *levels* to stop flattening after a certain level:
>>> iterable = [('a', ['b']), ('c', ['d'])]
>>> list(collapse(iterable)) # Fully flattened
['a', 'b', 'c', 'd']
>>> list(collapse(iterable, levels=1)) # Only one level flattened
['a', ['b'], 'c', ['d']]
"""
def walk(node, level):
if (
((levels is not None) and (level > levels)) or
isinstance(node, (str, bytes)) or
((base_type is not None) and isinstance(node, base_type))
):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
for x in walk(child, level + 1):
yield x
yield from walk(iterable, 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.