language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java
|
@Override
public float getAverageGetTime() {
long unsupportedCacheGetTotalMillis = TimeUnit.NANOSECONDS.toMillis(unsupportCacheGetTotalTime.longValue());
return TimeUnit.MILLISECONDS.toMicros(mapToSpecValidStat(
cache.getStats().getAverageReadTime() + unsupportedCacheGetTotalMillis));
}
|
python
|
def from_file(cls, filename):
"""Construct a molecule object read from the given file.
The file format is inferred from the extensions. Currently supported
formats are: ``*.cml``, ``*.fchk``, ``*.pdb``, ``*.sdf``, ``*.xyz``
If a file contains more than one molecule, only the first one is
read.
Argument:
| ``filename`` -- the name of the file containing the molecule
Example usage::
>>> mol = Molecule.from_file("foo.xyz")
"""
# TODO: many different API's to load files. brrr...
if filename.endswith(".cml"):
from molmod.io import load_cml
return load_cml(filename)[0]
elif filename.endswith(".fchk"):
from molmod.io import FCHKFile
fchk = FCHKFile(filename, field_labels=[])
return fchk.molecule
elif filename.endswith(".pdb"):
from molmod.io import load_pdb
return load_pdb(filename)
elif filename.endswith(".sdf"):
from molmod.io import SDFReader
return next(SDFReader(filename))
elif filename.endswith(".xyz"):
from molmod.io import XYZReader
xyz_reader = XYZReader(filename)
title, coordinates = next(xyz_reader)
return Molecule(xyz_reader.numbers, coordinates, title, symbols=xyz_reader.symbols)
else:
raise ValueError("Could not determine file format for %s." % filename)
|
java
|
public boolean mkdirs(String src, PermissionStatus permissions
) throws IOException {
INode newNode = mkdirsInternal(src, permissions);
getEditLog().logSync(false);
if (newNode != null && auditLog.isInfoEnabled()) {
logAuditEvent(getCurrentUGI(),
Server.getRemoteIp(),
"mkdirs", src, null, newNode);
}
return newNode != null;
}
|
python
|
def lie_between(self, target_time_range):
"""
判断是否落在目标时间区间内
:param target_time_range: 目标时间区间
:return: True or False
"""
if self.begin_dt >= target_time_range.begin_dt and self.end_dt <= \
target_time_range.end_dt:
return True
else:
return False
|
python
|
def getfieldidd(bch, fieldname):
"""get the idd dict for this field
Will return {} if the fieldname does not exist"""
# print(bch)
try:
fieldindex = bch.objls.index(fieldname)
except ValueError as e:
return {} # the fieldname does not exist
# so there is no idd
fieldidd = bch.objidd[fieldindex]
return fieldidd
|
java
|
public static filterpolicy_csvserver_binding[] get(nitro_service service, String name) throws Exception{
filterpolicy_csvserver_binding obj = new filterpolicy_csvserver_binding();
obj.set_name(name);
filterpolicy_csvserver_binding response[] = (filterpolicy_csvserver_binding[]) obj.get_resources(service);
return response;
}
|
python
|
def add_workflow_definitions(sbi_config: dict):
"""Add any missing SBI workflow definitions as placeholders.
This is a utility function used in testing and adds mock / test workflow
definitions to the database for workflows defined in the specified
SBI config.
Args:
sbi_config (dict): SBI configuration dictionary.
"""
registered_workflows = []
for i in range(len(sbi_config['processing_blocks'])):
workflow_config = sbi_config['processing_blocks'][i]['workflow']
workflow_name = '{}:{}'.format(workflow_config['id'],
workflow_config['version'])
if workflow_name in registered_workflows:
continue
workflow_definition = dict(
id=workflow_config['id'],
version=workflow_config['version'],
stages=[]
)
key = "workflow_definitions:{}:{}".format(workflow_config['id'],
workflow_config['version'])
DB.save_dict(key, workflow_definition, hierarchical=False)
registered_workflows.append(workflow_name)
|
java
|
public <T extends ChatStore> ChatConfig store(StoreFactory<T> builder) {
this.storeFactory = builder.asChatStoreFactory();
return this;
}
|
python
|
def getCentroid(attribute_variants, comparator):
"""
Takes in a list of attribute values for a field,
evaluates the centroid using the comparator,
& returns the centroid (i.e. the 'best' value for the field)
"""
n = len(attribute_variants)
distance_matrix = numpy.zeros([n, n])
# populate distance matrix by looping through elements of matrix triangle
for i in range(0, n):
for j in range(0, i):
distance = comparator(attribute_variants[i], attribute_variants[j])
distance_matrix[i, j] = distance_matrix[j, i] = distance
average_distance = distance_matrix.mean(0)
# there can be ties for minimum, average distance string
min_dist_indices = numpy.where(
average_distance == average_distance.min())[0]
if len(min_dist_indices) > 1:
centroid = breakCentroidTie(attribute_variants, min_dist_indices)
else:
centroid_index = min_dist_indices[0]
centroid = attribute_variants[centroid_index]
return centroid
|
python
|
def load_config_from_setup(app):
"""
Replace values in app.config from package metadata
"""
# for now, assume project root is one level up
root = os.path.join(app.confdir, '..')
setup_script = os.path.join(root, 'setup.py')
fields = ['--name', '--version', '--url', '--author']
dist_info_cmd = [sys.executable, setup_script] + fields
output = subprocess.check_output(
dist_info_cmd,
cwd=root,
universal_newlines=True,
)
outputs = output.strip().split('\n')
project, version, url, author = outputs
app.config.project = project
app.config.version = app.config.release = version
app.config.package_url = url
app.config.author = app.config.copyright = author
|
java
|
@Override
public void addPermissions(Collection<Permission> aPermssions)
{
if (aPermssions == null)
throw new IllegalArgumentException(
"aPermssions required in SecurityAccessControl");
// SecurityPermission element = null;
for (Iterator<Permission> i = aPermssions.iterator(); i.hasNext();)
{
addPermission((SecurityPermission) i.next());
}
}
|
python
|
def load(self, _override=True, _allow_undeclared=False, **kwargs):
"""load configuration values from kwargs, see load_from_dict()."""
self.load_from_dict(
kwargs, _override=_override, _allow_undeclared=_allow_undeclared)
|
java
|
@Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case AfplibPackage.EXTERNAL_ALGORITHM_RG__DIRCTN:
return DIRCTN_EDEFAULT == null ? dirctn != null : !DIRCTN_EDEFAULT.equals(dirctn);
case AfplibPackage.EXTERNAL_ALGORITHM_RG__PADBDRY:
return PADBDRY_EDEFAULT == null ? padbdry != null : !PADBDRY_EDEFAULT.equals(padbdry);
case AfplibPackage.EXTERNAL_ALGORITHM_RG__PADALMT:
return PADALMT_EDEFAULT == null ? padalmt != null : !PADALMT_EDEFAULT.equals(padalmt);
}
return super.eIsSet(featureID);
}
|
java
|
private WDataTable createTable() {
WDataTable table = new WDataTable();
table.addColumn(new WTableColumn("First name", WText.class));
table.addColumn(new WTableColumn("Last name", WText.class));
table.addColumn(new WTableColumn("DOB", WText.class));
table.setPaginationMode(PaginationMode.DYNAMIC);
table.setRowsPerPage(1);
table.setDataModel(createTableModel());
return table;
}
|
python
|
def query(self, query, *parameters, **kwargs):
"""Returns a row list for the given query and parameters."""
cursor = self._cursor()
try:
self._execute(cursor, query, parameters or None, kwargs)
if cursor.description:
column_names = [column.name for column in cursor.description]
res = [Row(zip(column_names, row)) for row in cursor.fetchall()]
cursor.close()
return res
except:
cursor.close()
raise
|
java
|
public void scheduleExpirationTask() {
if (nodeEngine.getLocalMember().isLiteMember() || scheduled.get()
|| !scheduled.compareAndSet(false, true)) {
return;
}
scheduledExpirationTask =
globalTaskScheduler.scheduleWithRepetition(task, taskPeriodSeconds,
taskPeriodSeconds, SECONDS);
scheduledOneTime.set(true);
}
|
java
|
private boolean tryIncrement(AtomicInteger counter, int max) {
// Repeatedly attempt to increment the given AtomicInteger until we
// explicitly succeed or explicitly fail
while (true) {
// Get current value
int count = counter.get();
// Bail out if the maximum has already been reached
if (count >= max && max != 0)
return false;
// Attempt to increment
if (counter.compareAndSet(count, count+1))
return true;
// Try again if unsuccessful
}
}
|
python
|
def getOrderVectors(self):
"""
Returns a list of lists, one for each preference, of candidates ordered from most preferred
to least. Note that ties are not indicated in the returned lists. Also returns a list of
the number of times each preference is given.
"""
orderVectors = []
for preference in self.preferences:
orderVectors.append(preference.getOrderVector())
return orderVectors
|
java
|
public static void setProperties( Properties prp ) {
Config.prp = new Properties( prp );
try {
Config.prp.putAll( System.getProperties() );
} catch( SecurityException se ) {
if( log.level > 1 )
log.println( "SecurityException: jcifs.smb1 will ignore System properties" );
}
}
|
java
|
public static boolean isSensitive(String key) {
Preconditions.checkNotNull(key, "key is null");
final String keyInLower = key.toLowerCase();
for (String hideKey : SENSITIVE_KEYS) {
if (keyInLower.length() >= hideKey.length()
&& keyInLower.contains(hideKey)) {
return true;
}
}
return false;
}
|
java
|
public Object injectAndPostConstruct(Class<?> Klass) throws InjectionProviderException
{
Object instance = null;
ManagedObject mo = (ManagedObject) ((WASInjectionProvider) getInjectionProvider()).inject(Klass, true, _externalContext);
instance = mo.getObject();
if (instance instanceof FacesWrapper)
{
Object innerInstance = ((FacesWrapper)instance).getWrapped();
if (innerInstance != null)
{
injectAndPostConstruct(innerInstance);
}
}
return instance;
}
|
python
|
def set_dynamic_time_fn(self_,time_fn,sublistattr=None):
"""
Set time_fn for all Dynamic Parameters of this class or
instance object that are currently being dynamically
generated.
Additionally, sets _Dynamic_time_fn=time_fn on this class or
instance object, so that any future changes to Dynamic
Parmeters can inherit time_fn (e.g. if a Number is changed
from a float to a number generator, the number generator will
inherit time_fn).
If specified, sublistattr is the name of an attribute of this
class or instance that contains an iterable collection of
subobjects on which set_dynamic_time_fn should be called. If
the attribute sublistattr is present on any of the subobjects,
set_dynamic_time_fn() will be called for those, too.
"""
self_or_cls = self_.self_or_cls
self_or_cls._Dynamic_time_fn = time_fn
if isinstance(self_or_cls,type):
a = (None,self_or_cls)
else:
a = (self_or_cls,)
for n,p in self_or_cls.param.objects('existing').items():
if hasattr(p, '_value_is_dynamic'):
if p._value_is_dynamic(*a):
g = self_or_cls.param.get_value_generator(n)
g._Dynamic_time_fn = time_fn
if sublistattr:
try:
sublist = getattr(self_or_cls,sublistattr)
except AttributeError:
sublist = []
for obj in sublist:
obj.param.set_dynamic_time_fn(time_fn,sublistattr)
|
python
|
def isOpen(self):
"""Returns whether all analyses from this Analysis Request are open
(their status is either "assigned" or "unassigned")
"""
for analysis in self.getAnalyses():
if not api.get_object(analysis).isOpen():
return False
return True
|
python
|
def user_record(uid, type=0):
"""获取用户的播放列表,必须登录
:param uid: 用户的ID,可通过登录或者其他接口获取
:param type: (optional) 数据类型,0:获取所有记录,1:获取 weekData
"""
if uid is None:
raise ParamsError()
r = NCloudBot()
r.method = 'USER_RECORD'
r.data = {'type': type, 'uid': uid, "csrf_token": ""}
r.send()
return r.response
|
python
|
def fill_auth_list_from_groups(self, auth_provider, user_groups, auth_list):
'''
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
'''
group_names = [item for item in auth_provider if item.endswith('%')]
if group_names:
for group_name in group_names:
if group_name.rstrip("%") in user_groups:
for matcher in auth_provider[group_name]:
auth_list.append(matcher)
return auth_list
|
python
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Windows Restore Point (rp.log) log file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_size = file_object.get_size()
file_header_map = self._GetDataTypeMap('rp_log_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
file_footer_map = self._GetDataTypeMap('rp_log_file_footer')
file_footer_offset = file_size - file_footer_map.GetByteSize()
try:
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_footer_offset, file_footer_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse file footer with error: {0!s}'.format(exception))
return
# The description in the file header includes the end-of-string character
# that we need to strip off.
description = file_header.description.rstrip('\0')
if file_footer.creation_time == 0:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=file_footer.creation_time)
event_data = RestorePointEventData()
event_data.description = description
event_data.restore_point_event_type = file_header.event_type
event_data.restore_point_type = file_header.restore_point_type
event_data.sequence_number = file_header.sequence_number
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
java
|
public void println(String text) {
try {
if(autoIndent) printIndent();
out.write(text);
println();
} catch(IOException ioe) {
throw new GroovyRuntimeException(ioe);
}
}
|
java
|
public static long getLongParameter(final ServletRequest pReq, final String pName, final long pDefault) {
String str = pReq.getParameter(pName);
try {
return str != null ? Long.parseLong(str) : pDefault;
}
catch (NumberFormatException nfe) {
return pDefault;
}
}
|
python
|
def get_torrent(self, torrent_id):
"""Gets the `.torrent` data for the given `torrent_id`.
:param torrent_id: the ID of the torrent to download
:raises TorrentNotFoundError: if the torrent does not exist
:returns: :class:`Torrent` of the associated torrent
"""
params = {
'page': 'download',
'tid': torrent_id,
}
r = requests.get(self.base_url, params=params)
if r.headers.get('content-type') != 'application/x-bittorrent':
raise TorrentNotFoundError(TORRENT_NOT_FOUND_TEXT)
torrent_data = r.content
return Torrent(torrent_id, torrent_data)
|
python
|
def add_user(name, password=None, runas=None):
'''
Add a rabbitMQ user via rabbitmqctl user_add <user> <password>
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.add_user rabbit_user password
'''
clear_pw = False
if password is None:
# Generate a random, temporary password. RabbitMQ requires one.
clear_pw = True
password = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for x in range(15))
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if salt.utils.platform.is_windows():
# On Windows, if the password contains a special character
# such as '|', normal execution will fail. For example:
# cmd: rabbitmq.add_user abc "asdf|def"
# stderr: 'def' is not recognized as an internal or external
# command,\r\noperable program or batch file.
# Work around this by using a shell and a quoted command.
python_shell = True
cmd = '"{0}" add_user "{1}" "{2}"'.format(
RABBITMQCTL, name, password
)
else:
python_shell = False
cmd = [RABBITMQCTL, 'add_user', name, password]
res = __salt__['cmd.run_all'](
cmd,
reset_system_locale=False,
output_loglevel='quiet',
runas=runas,
python_shell=python_shell)
if clear_pw:
# Now, Clear the random password from the account, if necessary
try:
clear_password(name, runas)
except Exception:
# Clearing the password failed. We should try to cleanup
# and rerun and error.
delete_user(name, runas)
raise
msg = 'Added'
return _format_response(res, msg)
|
java
|
@RequestMapping(path="/{orderId}/status", method = RequestMethod.PUT)
public ResponseEntity<Order> updateStatus(@PathVariable("orderId") Long orderId, @RequestParam("status") OrderStatus status){
Order order = orderService.updateStatus(orderId, status);
if(order != null){
return new ResponseEntity<Order>(order, HttpStatus.OK);
}
return new ResponseEntity<Order>(HttpStatus.NOT_FOUND);
}
|
java
|
private void mergeReleasedEntries(Segment segment, OffsetPredicate predicate, Segment compactSegment) {
for (long i = segment.firstIndex(); i <= segment.lastIndex(); i++) {
long offset = segment.offset(i);
if (offset != -1 && !predicate.test(offset)) {
compactSegment.release(i);
}
}
}
|
java
|
private boolean forceSettleCapturedViewAt(int finalLeft, int finalTop, int xvel, int yvel) {
final int startLeft = mCapturedView.getLeft();
final int startTop = mCapturedView.getTop();
final int dx = finalLeft - startLeft;
final int dy = finalTop - startTop;
if (dx == 0 && dy == 0) {
// Nothing to do. Send callbacks, be done.
mScroller.abortAnimation();
setDragState(STATE_IDLE);
return false;
}
final int duration = computeSettleDuration(mCapturedView, dx, dy, xvel, yvel);
mScroller.startScroll(startLeft, startTop, dx, dy, duration);
setDragState(STATE_SETTLING);
return true;
}
|
python
|
def timezone_from_str(tz_str):
"""
Convert a timezone string to a timezone object.
:param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]'
:return: a timezone object (tzinfo)
"""
m = re.match(r'UTC([+|-]\d{1,2}):(\d{2})', tz_str)
if m:
# in format 'UTC±[hh]:[mm]'
delta_h = int(m.group(1))
delta_m = int(m.group(2)) if delta_h >= 0 else -int(m.group(2))
return timezone(timedelta(hours=delta_h, minutes=delta_m))
# in format 'Asia/Shanghai'
try:
return pytz.timezone(tz_str)
except pytz.exceptions.UnknownTimeZoneError:
return None
|
java
|
public JSONObject batchSynonyms(List<JSONObject> objects, boolean forwardToReplicas, boolean replaceExistingSynonyms) throws AlgoliaException {
return this.batchSynonyms(objects, forwardToReplicas, replaceExistingSynonyms, RequestOptions.empty);
}
|
java
|
private boolean isClientChannelClosed(Throwable cause) {
if (cause instanceof ClosedChannelException ||
cause instanceof Errors.NativeIoException) {
LOG.error("ZuulFilterChainHandler::isClientChannelClosed - IO Exception");
return true;
}
return false;
}
|
java
|
final void clearTransportVersion() {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "clearTransportVersion");
getHdr2().setChoiceField(JsHdr2Access.TRANSPORTVERSION, JsHdr2Access.IS_TRANSPORTVERSION_EMPTY);
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(this, tc, "clearTransportVersion");
}
|
python
|
def _remove_layer_clicked(self):
"""Remove layer clicked."""
layer = self.list_layers_in_map_report.selectedItems()[0]
origin = layer.data(LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE)
parent_item = self.tree.findItems(
parent, Qt.MatchContains | Qt.MatchRecursive, 0)[0]
item = QTreeWidgetItem(parent_item, [definition(key)['name']])
item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, parent)
else:
parent_item = self.tree.findItems(
FROM_CANVAS['name'],
Qt.MatchContains | Qt.MatchRecursive, 0)[0]
item = QTreeWidgetItem(parent_item, [layer.text()])
layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id)
item.setData(0, LAYER_ORIGIN_ROLE, origin)
index = self.list_layers_in_map_report.indexFromItem(layer)
self.list_layers_in_map_report.takeItem(index.row())
self.list_layers_in_map_report.clearSelection()
|
python
|
def _getConfiguration(self):
"""
Load application configuration files.
:return: <dict>
"""
configDirectoryPath = os.path.join("application", "config")
config = Config(configDirectoryPath)
configData = config.getData()
# setting application parameters
reactor.suggestThreadPoolSize(
int(configData["performance"]["threadPoolSize"])
)
return configData
|
java
|
public static PlotCanvas plot(double[] x, double[] y, double[][] z) {
double[] lowerBound = {Math.min(x), Math.min(y)};
double[] upperBound = {Math.max(x), Math.max(y)};
PlotCanvas canvas = new PlotCanvas(lowerBound, upperBound, false);
canvas.add(new Heatmap(x, y, z));
canvas.getAxis(0).setGridVisible(false);
canvas.getAxis(1).setGridVisible(false);
return canvas;
}
|
python
|
def read_lines(self, max_lines=None):
"""Reads the content of this object as text, and return a list of lines up to some max.
Args:
max_lines: max number of lines to return. If None, return all lines.
Returns:
The text content of the object as a list of lines.
Raises:
Exception if there was an error requesting the object's content.
"""
if max_lines is None:
return self.read_stream().split('\n')
max_to_read = self.metadata.size
bytes_to_read = min(100 * max_lines, self.metadata.size)
while True:
content = self.read_stream(byte_count=bytes_to_read)
lines = content.split('\n')
if len(lines) > max_lines or bytes_to_read >= max_to_read:
break
# try 10 times more bytes or max
bytes_to_read = min(bytes_to_read * 10, max_to_read)
# remove the partial line at last
del lines[-1]
return lines[0:max_lines]
|
java
|
private static List<BeanMappingObject> parseMappingObject(InputStream in) {
Document doc = XmlHelper.createDocument(
in,
Thread.currentThread().getContextClassLoader().getResourceAsStream(
MAPPING_SCHEMA));
Element root = doc.getDocumentElement();
NodeList globalNodeList = root.getElementsByTagName("global-configurations");
if (globalNodeList.getLength() > 1) {
throw new BeanMappingException("global-configurations is exceed one node!");
}
BeanMappingBehavior globalBehavior = BeanMappingConfigHelper.getInstance().getGlobalBehavior();
if (globalNodeList.getLength() == 1) {
globalBehavior = BeanMappingBehaviorParse.parse(globalNodeList.item(0), globalBehavior);
BeanMappingConfigHelper.getInstance().setGlobalBehavior(globalBehavior);
}
NodeList classAliasNodeList = root.getElementsByTagName("class-alias-configurations");
for (int i = 0; i < classAliasNodeList.getLength(); i++) {
ClassAliasParse.parseAndRegister(classAliasNodeList.item(i));
}
NodeList convertorNodeList = root.getElementsByTagName("convertors-configurations");
for (int i = 0; i < convertorNodeList.getLength(); i++) {
ConvertorParse.parseAndRegister(convertorNodeList.item(i));
}
NodeList functionClassNodeList = root.getElementsByTagName("function-class-configurations");
for (int i = 0; i < functionClassNodeList.getLength(); i++) {
FunctionClassParse.parseAndRegister(functionClassNodeList.item(i));
}
NodeList nodeList = root.getElementsByTagName("bean-mapping");
List<BeanMappingObject> mappings = new ArrayList<BeanMappingObject>();
// 解析BeanMappingObject属性
for (int i = 0; i < nodeList.getLength(); i++) {
BeanMappingObject config = BeanMappingParse.parse(nodeList.item(i), globalBehavior);
// 添加到返回结果
mappings.add(config);
}
return mappings;
}
|
java
|
private void init(final Calendar definingCalendar) {
patterns = new ArrayList<>();
final StrategyParser fm = new StrategyParser(definingCalendar);
for (;;) {
final StrategyAndWidth field = fm.getNextStrategy();
if (field == null) {
break;
}
patterns.add(field);
}
}
|
python
|
def build(outname, wcsname, refimage, undistort=False,
applycoeffs=False, coeffsfile=None, **wcspars):
""" Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file
"""
# Insure that the User WCS parameters have values for all the parameters,
# even if that value is 'None'
user_wcs_pars = convert_user_pars(wcspars)
userwcs = wcspars['userwcs']
"""
Use cases to document the logic required to interpret the parameters
WCS generation based on refimage/userwcs parameters
-------------------------------------------------------------
refimage == None, userwcs == False:
*NO WCS specified*
=> print a WARNING message and return without doing anything
refimage == None, userwcs == True:
=> Create WCS without a distortion model entirely from user parameters*
refimage != None, userwcs == False:
=> No user WCS parameters specified
=> Simply use refimage WCS as specified
refimage != None, userwcs == True:
=> Update refimage WCS with user specified values*
Apply distortion and generate final headerlet using processed WCS
-----------------------------------------------------------------
refimage == None, userwcs == True:
*Output WCS generated entirely from user supplied parameters*
Case 1: applycoeffs == False, undistort == True/False (ignored)
=> no distortion model to interpret
=> generate undistorted headerlet with no distortion model
Case 2: applycoeffs == True/False, undistort == True
=> ignore any user specified distortion model
=> generate undistorted headerlet with no distortion model
Case 3: applycoeffs == True, undistort == False
=> WCS from scratch combined with distortion model from another image
=> generate headerlet with distortion model
refimage != None, userwcs == True/False:
*Output WCS generated from reference image possibly modified by user parameters*
Case 4: applycoeffs == False, undistort == True
=> If refimage has distortion, remove it
=> generate undistorted headerlet with no distortion model
Case 5: applycoeffs == False, undistort == False
=> Leave refimage distortion model (if any) unmodified
=> generate a headerlet using same distortion model (if any) as refimage
Case 6: applycoeffs == True, undistort == False
=> Update refimage with distortion model with user-specified model
=> generate a headerlet with a distortion model
Case 7: applycoeffs == True, undistort == True
=> ignore user specified distortion model and undistort WCS
=> generate a headerlet without a distortion model
"""
### Build WCS from refimage and/or user pars
if util.is_blank(refimage) and not userwcs:
print('WARNING: No WCS specified... No WCS created!')
return
customwcs = None
if util.is_blank(refimage) and userwcs:
# create HSTWCS object from user parameters
complete_wcs = True
for key in user_wcs_pars:
if util.is_blank(user_wcs_pars[key]):
complete_wcs = False
break
if complete_wcs:
customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'],
user_wcs_pars['crpix1'],user_wcs_pars['crpix2'],
user_wcs_pars['naxis1'],user_wcs_pars['naxis2'],
user_wcs_pars['pscale'],user_wcs_pars['orientat'])
else:
print('WARNING: Not enough WCS information provided by user!')
raise ValueError
if not util.is_blank(refimage):
refwcs = stwcs.wcsutil.HSTWCS(refimage)
else:
refwcs = customwcs
### Apply distortion model (if any) to update WCS
if applycoeffs and not util.is_blank(coeffsfile):
if not util.is_blank(refimage):
replace_model(refwcs, coeffsfile)
else:
if not undistort:
add_model(refwcs,coeffsfile)
# Only working with custom WCS from user, no distortion
# so apply model to WCS, including modifying the CD matrix
apply_model(refwcs)
### Create undistorted WCS, if requested
if undistort:
outwcs = undistortWCS(refwcs)
else:
outwcs = refwcs
if userwcs:
# replace (some/all?) WCS values from refimage with user WCS values
# by running 'updatewcs' functions on input WCS
outwcs = mergewcs(outwcs,customwcs,user_wcs_pars)
### Create the final headerlet and write it out, if specified
if not util.is_blank(refimage):
template = refimage
elif not util.is_blank(coeffsfile):
template = coeffsfile
else:
template = None
# create default WCSNAME if None was given
wcsname = create_WCSname(wcsname)
print('Creating final headerlet with name ',wcsname,' using template ',template)
outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname)
# synchronize this new WCS with the rest of the chips in the image
for ext in outhdr:
if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':
ext_wcs = wcsutil.HSTWCS(ext)
stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs)
return outwcs
|
java
|
@Override
public Object getValue(final Object _currentObject)
throws EFapsException
{
final Type tempType;
if (this.type.getMainTable().getSqlColType() == null) {
tempType = this.type;
} else if (_currentObject != null) {
// check is necessary because Oracle JDBC returns for getObject always a BigDecimal
if (_currentObject instanceof BigDecimal) {
tempType = Type.get(((BigDecimal) _currentObject).longValue());
} else {
tempType = Type.get((Long) _currentObject);
}
} else {
tempType = null;
}
return analyzeChildValue(this, tempType);
}
|
java
|
@Override
public void validate(ValidationHelper helper, Context context, String key, Info t) {
if (t != null) {
ValidatorUtils.validateRequiredField(t.getVersion(), context, "version").ifPresent(helper::addValidationEvent);
ValidatorUtils.validateRequiredField(t.getTitle(), context, "title").ifPresent(helper::addValidationEvent);
if (t.getTermsOfService() != null) {
if (!ValidatorUtils.isValidURI(t.getTermsOfService())) {
final String message = Tr.formatMessage(tc, "infoTermsOfServiceInvalidURL", t.getTermsOfService());
helper.addValidationEvent(new ValidationEvent(ValidationEvent.Severity.ERROR, context.getLocation("termsOfService"), message));
}
}
}
}
|
python
|
def link(self):
"""link the program, making it the active shader.
.. note:: Shader.bind() is preferred here, because link() Requires the Shader to be compiled already.
"""
gl.glLinkProgram(self.id)
# Check if linking was successful. If not, print the log.
link_status = c_int(0)
gl.glGetProgramiv(self.id, gl.GL_LINK_STATUS, byref(link_status))
if not link_status:
gl.glGetProgramiv(self.id, gl.GL_INFO_LOG_LENGTH, byref(link_status)) # retrieve the log length
buffer = create_string_buffer(link_status.value) # create a buffer for the log
gl.glGetProgramInfoLog(self.id, link_status, None, buffer) # retrieve the log text
print(buffer.value) # print the log to the console
self.is_linked = True
|
java
|
public ImportRestApiRequest withParameters(java.util.Map<String, String> parameters) {
setParameters(parameters);
return this;
}
|
java
|
public static String getCurrentRemoteConnectionLink(){
if(!APILookupUtility.isLocal()) try{
return URLEncoder.encode(APILookupUtility.getCurrentRemoteInstance().getHost(), "UTF-8") +
COLON_URL_ENCODED +
APILookupUtility.getCurrentRemoteInstance().getPort();
}catch (IllegalStateException | UnsupportedEncodingException ignored){/*null be returned*/}
return null;
}
|
python
|
def tx_tmpdir(data=None, base_dir=None, remove=True):
"""Context manager to create and remove a transactional temporary directory.
Handles creating a transactional directory for running commands in. Will
use either the current directory or /tmp/bcbiotx.
Creates an intermediary location and time specific directory for global
temporary directories to prevent collisions.
data can be the full world information object being process or a
configuration dictionary.
"""
base_dir = base_dir or os.getcwd()
tmpdir_base = utils.get_abspath(_get_base_tmpdir(data, base_dir))
utils.safe_makedir(tmpdir_base)
tmp_dir = tempfile.mkdtemp(dir=tmpdir_base)
#logger.debug("Created tmp dir %s " % tmp_dir)
try:
yield tmp_dir
finally:
if remove:
utils.remove_safe(tmp_dir)
|
java
|
@Override
public HystrixDynamicProperty<Integer> getInteger(final String name, final Integer fallback) {
return new HystrixDynamicProperty<Integer>() {
@Override
public String getName() {
return name;
}
@Override
public Integer get() {
return Integer.getInteger(name, fallback);
}
@Override
public void addCallback(Runnable callback) {
}
};
}
|
python
|
def document_members(self, all_members=False):
# type: (bool) -> None
"""Generate reST for member documentation.
If *all_members* is True, do all members, else those given by
*self.options.members*.
"""
sourcename = self.get_sourcename()
want_all = all_members or self.options.members is ALL
if not want_all and not self.options.members:
return
expressions = [
SolidityObject.file == self.object.file,
SolidityObject.contract_name == self.object.name
]
if not want_all:
members_inset = set()
should_include_fallback = False
should_include_constructor = False
for member in self.options.members:
if member == '<fallback>':
should_include_fallback = True
elif member == 'constructor':
should_include_constructor = True
elif member:
members_inset.add(member)
expr = SolidityObject.name.in_(members_inset)
if should_include_fallback:
expr |= (SolidityObject.objtype == 'function') & (SolidityObject.name.is_null(True))
if should_include_constructor:
expr |= (SolidityObject.objtype == 'constructor') & (SolidityObject.name.is_null(True))
expressions.append(expr)
if self.options.exclude_members:
should_exclude_fallback = False
should_exclude_constructor = False
if '<fallback>' in self.options.exclude_members:
self.options.exclude_members.remove('<fallback>')
should_exclude_fallback = True
if 'constructor' in self.options.exclude_members:
self.options.exclude_members.remove('constructor')
should_exclude_constructor = True
expr = SolidityObject.name.not_in(self.options.exclude_members)
subexpr = SolidityObject.name.is_null(True)
if should_exclude_fallback:
subexpr &= (SolidityObject.objtype != 'function')
if should_exclude_constructor:
subexpr &= (SolidityObject.objtype != 'constructor')
expr |= subexpr
expressions.append(expr)
for member in SolidityObject.select().where(*expressions):
self.add_line('', sourcename)
full_mname = '{file}:{contract}{name}{paramtypes}'.format(
file=member.file,
contract='' if member.contract_name is None
else member.contract_name + '.',
name=member.name or '',
paramtypes='' if member.paramtypes is None
else '(' + member.paramtypes + ')',
)
documenter = all_solidity_documenters[member.objtype](
self.directive, full_mname, self.indent)
documenter.generate(all_members=True)
|
java
|
public static void setSreSpecificData(SRESpecificDataContainer container, Object data) {
assert container != null;
container.$setSreSpecificData(data);
}
|
java
|
private void checkForThreadErrors() throws IOException {
if(error == true && childException != null) {
error = false;
IOException temp = childException;
childException = null;
throw temp;
}
}
|
python
|
def make_parser(parser_creator=None, **kwargs):
"""Returns a base argument parser for the ray.tune tool.
Args:
parser_creator: A constructor for the parser class.
kwargs: Non-positional args to be passed into the
parser class constructor.
"""
if parser_creator:
parser = parser_creator(**kwargs)
else:
parser = argparse.ArgumentParser(**kwargs)
# Note: keep this in sync with rllib/train.py
parser.add_argument(
"--run",
default=None,
type=str,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--stop",
default="{}",
type=json.loads,
help="The stopping criteria, specified in JSON. The keys may be any "
"field returned by 'train()' e.g. "
"'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop "
"after 600 seconds or 100k iterations, whichever is reached first.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams), "
"specified in JSON.")
parser.add_argument(
"--resources-per-trial",
default=None,
type=json_to_resources,
help="Override the machine resources to allocate per trial, e.g. "
"'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned "
"unless you specify them here. For RLlib, you probably want to "
"leave this alone and use RLlib configs to control parallelism.")
parser.add_argument(
"--num-samples",
default=1,
type=int,
help="Number of times to repeat each trial.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"--trial-name-creator",
default=None,
help="Optional creator function for the trial string, used in "
"generating a trial directory.")
parser.add_argument(
"--sync-function",
default=None,
help="Function for syncing the local_dir to upload_dir. If string, "
"then it must be a string template for syncer to run and needs to "
"include replacement fields '{local_dir}' and '{remote_dir}'.")
parser.add_argument(
"--loggers",
default=None,
help="List of logger creators to be used with each Trial. "
"Defaults to ray.tune.logger.DEFAULT_LOGGERS.")
parser.add_argument(
"--checkpoint-freq",
default=0,
type=int,
help="How many training iterations between checkpoints. "
"A value of 0 (default) disables checkpointing.")
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help="Whether to checkpoint at the end of the experiment. "
"Default is False.")
parser.add_argument(
"--keep-checkpoints-num",
default=None,
type=int,
help="Number of last checkpoints to keep. Others get "
"deleted. Default (None) keeps all checkpoints.")
parser.add_argument(
"--checkpoint-score-attr",
default="training_iteration",
type=str,
help="Specifies by which attribute to rank the best checkpoint. "
"Default is increasing order. If attribute starts with min- it "
"will rank attribute in decreasing order. Example: "
"min-validation_loss")
parser.add_argument(
"--export-formats",
default=None,
help="List of formats that exported at the end of the experiment. "
"Default is None. For RLlib, 'checkpoint' and 'model' are "
"supported for TensorFlow policy graphs.")
parser.add_argument(
"--max-failures",
default=3,
type=int,
help="Try to recover a trial from its last checkpoint at least this "
"many times. Only applies if checkpointing is enabled.")
parser.add_argument(
"--scheduler",
default="FIFO",
type=str,
help="FIFO (default), MedianStopping, AsyncHyperBand, "
"HyperBand, or HyperOpt.")
parser.add_argument(
"--scheduler-config",
default="{}",
type=json.loads,
help="Config options to pass to the scheduler.")
# Note: this currently only makes sense when running a single trial
parser.add_argument(
"--restore",
default=None,
type=str,
help="If specified, restore from this checkpoint.")
return parser
|
python
|
def instantiate_components(self, callback=None):
"""
Instantiates the Components.
Usage::
>>> manager = Manager((tests_manager,))
>>> manager.register_components()
True
>>> manager.instantiate_components()
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5bb0>
:param callback: Callback object.
:type callback: object
"""
uninstantiated_components = [component
for component in self.list_components()
if not self.instantiate_component(component, callback)]
if not uninstantiated_components:
return True
else:
raise manager.exceptions.ComponentInstantiationError(
"{0} | '{1}' Components failed to instantiate!".format(self.__class__.__name__,
", ".join(uninstantiated_components)))
|
python
|
def cache_for(**timedelta_kw):
"""
Set Cache-Control headers and Expires-header.
Expects a timedelta instance.
"""
max_age_timedelta = timedelta(**timedelta_kw)
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersFromTimedeltaCallback(max_age_timedelta)
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func
|
python
|
def _next_shape_id(self):
"""Return unique shape id suitable for use with a new shape element.
The returned id is the next available positive integer drawing object
id in shape tree, starting from 1 and making use of any gaps in
numbering. In practice, the minimum id is 2 because the spTree
element itself is always assigned id="1".
"""
id_str_lst = self.xpath('//@id')
used_ids = [int(id_str) for id_str in id_str_lst if id_str.isdigit()]
for n in range(1, len(used_ids)+2):
if n not in used_ids:
return n
|
java
|
protected void safeExecuteChangeListener(CmsEntity entity, I_CmsEntityChangeListener listener) {
try {
listener.onEntityChange(entity);
} catch (Exception e) {
String stack = CmsClientStringUtil.getStackTrace(e, "<br />");
CmsDebugLog.getInstance().printLine("<br />" + e.getMessage() + "<br />" + stack);
}
}
|
java
|
public <A> BindStep<ConfigFactory, A> bind(final Class<A> type) {
checkNotNull(type);
return new BindStep<ConfigFactory, A>() {
@Override
public ConfigFactory toInstance(A instance) {
return withBindings(
bindings.set(type, checkNotNull(instance))
);
}
@Override
public ConfigFactory toNothing() {
return withBindings(
bindings.remove(type)
);
}
};
}
|
python
|
def relookup(self, pattern):
""" Dictionary lookup with a regular expression. Return pairs whose key
matches pattern.
"""
key = re.compile(pattern)
return filter(lambda x : key.match(x[0]), self.data.items())
|
python
|
def qteToBeKilled(self):
"""
Remove all selections and install the original lexer.
"""
self.qteWidget.SCISetStylingEx(0, 0, self.styleOrig)
self.qteWidget.qteSetLexer(self.originalLexer)
|
python
|
def to_prettytable(df):
"""Convert DataFrame into ``PrettyTable``.
"""
pt = PrettyTable()
pt.field_names = df.columns
for tp in zip(*(l for col, l in df.iteritems())):
pt.add_row(tp)
return pt
|
python
|
def ask(self, choices, **options):
"""
Sends a prompt to the user and optionally waits for a response.
Arguments: "choices" is a Choices object
See https://www.tropo.com/docs/webapi/ask
"""
# # **Sun May 15 21:21:29 2011** -- egilchri
# Settng the voice in this method call has priority.
# Otherwise, we can pick up the voice from the Tropo object,
# if it is set there.
if hasattr (self, 'voice'):
if (not 'voice' in options):
options['voice'] = self.voice
# # **Sun May 15 21:21:29 2011** -- egilchri
self._steps.append(Ask(choices, **options).obj)
|
python
|
def state_fidelity(state0: State, state1: State) -> bk.BKTensor:
"""Return the quantum fidelity between pure states."""
assert state0.qubits == state1.qubits # FIXME
tensor = bk.absolute(bk.inner(state0.tensor, state1.tensor))**bk.fcast(2)
return tensor
|
python
|
def add_mongo_config(app, simple_connection_string,
mongo_uri, collection_name):
"""
Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return:
"""
if mongo_uri != (None, None):
add_mongo_config_with_uri(app, mongo_uri[0], mongo_uri[1],
collection_name)
if simple_connection_string is not None:
print("Ignoring the -m option. Overridden by "
"a more specific option (-mu).", file=sys.stderr)
else:
# Use the default value 'sacred' when not specified
if simple_connection_string is None:
simple_connection_string = "sacred"
add_mongo_config_simple(app, simple_connection_string, collection_name)
|
python
|
def iterate(self, params, repetition, iteration):
"""
Called once for each training iteration (== epoch here).
"""
print("\nStarting iteration",iteration)
print("Learning rate:", self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr())
t1 = time.time()
ret = {}
# Update dataset epoch when using pre-processed speech dataset
if self.use_preprocessed_dataset:
t2 = time.time()
self.train_loader.dataset.next_epoch()
self.validation_loader.dataset.next_epoch()
self.test_loader.dataset.next_epoch()
self.bg_noise_loader.dataset.next_epoch()
print("Dataset Load time = {0:.3f} secs, ".format(time.time() - t2))
# Update learning rate using learning rate scheduler if configured
if self.lr_scheduler is not None:
# ReduceLROnPlateau lr_scheduler step should be called after validation,
# all other lr_schedulers should be called before training
if params["lr_scheduler"] != "ReduceLROnPlateau":
self.lr_scheduler.step()
self.train(params, epoch=iteration, repetition=repetition)
# Run validation test
if self.validation_loader is not None:
validation = self.test(params, self.validation_loader)
# ReduceLROnPlateau step should be called after validation
if params["lr_scheduler"] == "ReduceLROnPlateau":
self.lr_scheduler.step(validation["test_loss"])
ret["validation"] = validation
print("Validation: error=", validation["testerror"],
"entropy=", validation["entropy"],
"loss=", validation["test_loss"])
ret.update({"validationerror": validation["testerror"]})
# Run test set
if self.test_loader is not None:
testResults = self.test(params, self.test_loader)
ret["testResults"] = testResults
print("Test: error=", testResults["testerror"],
"entropy=", testResults["entropy"],
"loss=", testResults["test_loss"])
ret.update({"testerror": testResults["testerror"]})
# Run bg noise set
if self.bg_noise_loader is not None:
bgResults = self.test(params, self.bg_noise_loader)
ret["bgResults"] = bgResults
print("BG noise error=", bgResults["testerror"])
ret.update({"bgerror": bgResults["testerror"]})
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
# Run noise set
if params.get("run_noise_tests", False):
ret.update(self.runNoiseTests(params))
print("Noise test results: totalCorrect=", ret["totalCorrect"],
"Test error=", ret["testerror"], ", entropy=", ret["entropy"])
ret.update({"elapsedTime": time.time() - self.startTime})
ret.update({"learningRate": self.learningRate if self.lr_scheduler is None
else self.lr_scheduler.get_lr()})
print("Iteration time= {0:.3f} secs, "
"total elapsed time= {1:.3f} mins".format(
time.time() - t1,ret["elapsedTime"]/60.0))
return ret
|
python
|
def generic_document_type_formatter(view, context, model, name):
"""Return AdminLog.document field wrapped in URL to its list view."""
_document_model = model.get('document').document_type
url = _document_model.get_admin_list_url()
return Markup('<a href="%s">%s</a>' % (url, _document_model.__name__))
|
python
|
def _export(self, path, variables_saver):
"""Internal.
Args:
path: string where to export the module to.
variables_saver: an unary-function that writes the module variables
checkpoint on the given path.
"""
self._saved_model_handler.export(path, variables_saver=variables_saver)
module_def_proto = module_def_pb2.ModuleDef()
module_def_proto.format = module_def_pb2.ModuleDef.FORMAT_V3
module_def_filename = get_module_proto_path(path)
tf_utils.atomic_write_string_to_file(
module_def_filename,
module_def_proto.SerializeToString(),
overwrite=False)
logging.info("Exported TF-Hub module to: %s", path)
|
python
|
def set_led(self, state):
"""Set the LED state to state (True or False)"""
if self.connected:
reports = self.device.find_output_reports()
for report in reports:
if self.led_usage in report:
report[self.led_usage] = state
report.send()
|
python
|
def _get_peer_connection(self, blacklist=None):
"""Find a peer and connect to it.
Returns a ``(peer, connection)`` tuple.
Raises ``NoAvailablePeerError`` if no healthy peers are found.
:param blacklist:
If given, a set of hostports for peers that we must not try.
"""
blacklist = blacklist or set()
peer = None
connection = None
while connection is None:
peer = self._choose(blacklist)
if not peer:
raise NoAvailablePeerError(
"Can't find an available peer for '%s'" % self.service
)
try:
connection = yield peer.connect()
except NetworkError as e:
log.info(
'Failed to connect to %s. Trying a different host.',
peer.hostport,
exc_info=e,
)
connection = None
blacklist.add(peer.hostport)
raise gen.Return((peer, connection))
|
java
|
@BetaApi
public final Operation deleteAddress(ProjectRegionAddressName address) {
DeleteAddressHttpRequest request =
DeleteAddressHttpRequest.newBuilder()
.setAddress(address == null ? null : address.toString())
.build();
return deleteAddress(request);
}
|
python
|
def set_parameter(self, name, value):
"""
Set a parameter value by name
Args:
name: The name of the parameter
value (float): The new value for the parameter
"""
i = self.get_parameter_names(include_frozen=True).index(name)
v = self.get_parameter_vector(include_frozen=True)
v[i] = value
self.set_parameter_vector(v, include_frozen=True)
|
python
|
def set_volume(self, volume):
"""Set volume."""
self._player.set_property(PROP_VOLUME, volume)
self._manager[ATTR_VOLUME] = volume
_LOGGER.info('volume set to %.2f', volume)
|
python
|
def find_field(ctx, search, by_type, obj):
"""Find fields in registered data models."""
# TODO: Fix this to work recursively on all possible subschemes
if search is not None:
search = search
else:
search = _ask("Enter search term")
database = ctx.obj['db']
def find(search_schema, search_field, find_result=None, key=""):
"""Examine a schema to find fields by type or name"""
if find_result is None:
find_result = []
fields = search_schema['properties']
if not by_type:
if search_field in fields:
find_result.append(key)
# log("Found queried fieldname in ", model)
else:
for field in fields:
try:
if "type" in fields[field]:
# log(fields[field], field)
if fields[field]["type"] == search_field:
find_result.append((key, field))
# log("Found field", field, "in", model)
except KeyError as e:
log("Field access error:", e, type(e), exc=True,
lvl=debug)
if 'properties' in fields:
# log('Sub properties checking:', fields['properties'])
find_result.append(find(fields['properties'], search_field,
find_result, key=fields['name']))
for field in fields:
if 'items' in fields[field]:
if 'properties' in fields[field]['items']:
# log('Sub items checking:', fields[field])
find_result.append(find(fields[field]['items'], search_field,
find_result, key=field))
else:
pass
# log('Items without proper definition!')
return find_result
if obj is not None:
schema = database.objectmodels[obj]._schema
result = find(schema, search, [], key="top")
if result:
# log(args.object, result)
print(obj)
pprint(result)
else:
for model, thing in database.objectmodels.items():
schema = thing._schema
result = find(schema, search, [], key="top")
if result:
print(model)
# log(model, result)
print(result)
|
python
|
def update(self):
"""Updates the currently running animation.
This method should be called in every frame where you want an animation to run.
Its job is to figure out if it is time to move onto the next image in the animation.
"""
returnValue = False # typical return value
if self.state != PygAnimation.PLAYING:
return returnValue
# The job here is to figure out the index of the image to show
# and the matching elapsed time threshold for the current image
self.elapsed = (time.time() - self.playingStartTime)
if self.elapsed > self.elapsedStopTime: # anim finished
if self.loop: # restart the animation
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
else: # not looping
self.nIterationsLeft = self.nIterationsLeft - 1
if self.nIterationsLeft == 0: # done
self.state = PygAnimation.STOPPED
if self.callBack != None: # if there is a callBack
self.callBack(self.nickname) # do it
returnValue = True # animation has ended
else: # another iteration - start over again
self.playingStartTime = time.time()
self.nextElapsedThreshold = self.endTimesList[0]
self.index = 0
elif self.elapsed > self.nextElapsedThreshold:
# Time to move on to next picture
self.index = self.index + 1
self.nextElapsedThreshold = self.endTimesList[self.index]
return returnValue
|
python
|
def active():
'''
Return current active profile
CLI Example:
.. code-block:: bash
salt '*' tuned.active
'''
# turn off all profiles
result = __salt__['cmd.run']('tuned-adm active')
pattern = re.compile(r'''(?P<stmt>Current active profile:) (?P<profile>\w+.*)''')
match = re.match(pattern, result)
return '{0}'.format(match.group('profile'))
|
python
|
def replace_namespaced_service(self, name, namespace, body, **kwargs):
"""
replace the specified Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_service(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Service body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Service
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_service_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_service_with_http_info(name, namespace, body, **kwargs)
return data
|
java
|
public final void evaluate() throws IOException {
final SequenceLabelerEvaluator evaluator = new SequenceLabelerEvaluator(this.corpusFormat,
this.sequenceLabeler);
evaluator.evaluate(this.testSamples);
System.out.println(evaluator.getFMeasure());
}
|
java
|
public void asyncPut(final byte[] data, final String key, final String token, StringMap params,
String mime, boolean checkCrc, UpCompletionHandler handler) throws IOException {
checkArgs(key, data, null, token);
if (mime == null) {
mime = Client.DefaultMime;
}
params = filterParam(params);
new FormUploader(client, token, key, data, params, mime, checkCrc, configuration).asyncUpload(handler);
}
|
python
|
def process_sums(self):
"""
Process cardinality sums participating in a new core.
Whenever necessary, some of the sum assumptions are
removed or split (depending on the value of
``self.minw``). Deleted sums are marked as garbage and are
dealt with in :func:`filter_assumps`.
In some cases, the process involves updating the
right-hand sides of the existing cardinality sums (see the
call to :func:`update_sum`). The overall procedure is
detailed in [1]_.
"""
for l in self.core_sums:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# increase bound for the sum
t, b = self.update_sum(l)
# updating bounds and weights
if b < len(t.rhs):
lnew = -t.rhs[b]
if lnew in self.garbage:
self.garbage.remove(lnew)
self.wght[lnew] = 0
if lnew not in self.wght:
self.set_bound(t, b)
else:
self.wght[lnew] += self.minw
# put this assumption to relaxation vars
self.rels.append(-l)
|
python
|
def readlines(self, encoding=None):
"""Reads from the file and returns result as a list of lines."""
try:
encoding = encoding or ENCODING
with codecs.open(self.path, encoding=None) as fi:
return fi.readlines()
except:
return []
|
java
|
public String asBase64(BaseEncoding.Dialect dialect, BaseEncoding.Padding padding) throws IOException {
String standardBase64 = DatatypeConverter.printBase64Binary(asByteArray(false));
if (dialect == BaseEncoding.Dialect.STANDARD && padding == BaseEncoding.Padding.STANDARD) {
return standardBase64;
}
StringBuilder safeBase64 = new StringBuilder(standardBase64.length());
for(int i=0; i<standardBase64.length(); i++) {
char c = standardBase64.charAt(i);
if (dialect == BaseEncoding.Dialect.SAFE) {
if (c == '+') c = '-';
else if (c == '/') c = '_';
}
if (c == '=') {
if (padding == BaseEncoding.Padding.STANDARD) {
safeBase64.append('=');
}
else if (padding == BaseEncoding.Padding.SAFE) {
safeBase64.append('.');
}
} else {
safeBase64.append(c);
}
}
return safeBase64.toString();
}
|
python
|
def from_rgb(r, g=None, b=None):
"""
Return the nearest xterm 256 color code from rgb input.
"""
c = r if isinstance(r, list) else [r, g, b]
best = {}
for index, item in enumerate(colors):
d = __distance(item, c)
if(not best or d <= best['distance']):
best = {'distance': d, 'index': index}
if 'index' in best:
return best['index']
else:
return 1
|
java
|
private void stop(Attributes attributes) throws SVGParseException
{
debug("<stop>");
if (currentElement == null)
throw new SVGParseException("Invalid document. Root element must be <svg>");
if (!(currentElement instanceof SVG.GradientElement))
throw new SVGParseException("Invalid document. <stop> elements are only valid inside <linearGradient> or <radialGradient> elements.");
SVG.Stop obj = new SVG.Stop();
obj.document = svgDocument;
obj.parent = currentElement;
parseAttributesCore(obj, attributes);
parseAttributesStyle(obj, attributes);
parseAttributesStop(obj, attributes);
currentElement.addChild(obj);
currentElement = obj;
}
|
java
|
public static void runExample(
AdManagerServices adManagerServices, AdManagerSession session, long userId)
throws RemoteException {
// Get the UserService.
UserServiceInterface userService = adManagerServices.get(session, UserServiceInterface.class);
// Create a statement to only select a single user by ID.
StatementBuilder statementBuilder =
new StatementBuilder()
.where("id = :id")
.orderBy("id ASC")
.limit(1)
.withBindVariableValue("id", userId);
// Get the user.
UserPage page = userService.getUsersByStatement(statementBuilder.toStatement());
User user = Iterables.getOnlyElement(Arrays.asList(page.getResults()));
// Set the role of the user to a salesperson.
// To determine what other roles exist, run GetAllRoles.java.
user.setRoleId(-5L);
// Update the user on the server.
User[] users = userService.updateUsers(new User[] {user});
for (User updatedUser : users) {
System.out.printf(
"User with ID %d and name '%s' was updated.%n",
updatedUser.getId(), updatedUser.getName());
}
}
|
python
|
def save_figure(self, event=None, transparent=False, dpi=600):
""" save figure image to file"""
file_choices = "PNG (*.png)|*.png|SVG (*.svg)|*.svg|PDF (*.pdf)|*.pdf"
try:
ofile = self.conf.title.strip()
except:
ofile = 'Image'
if len(ofile) > 64:
ofile = ofile[:63].strip()
if len(ofile) < 1:
ofile = 'plot'
for c in ' :";|/\\': # "
ofile = ofile.replace(c, '_')
ofile = ofile + '.png'
orig_dir = os.path.abspath(os.curdir)
dlg = wx.FileDialog(self, message='Save Plot Figure as...',
defaultDir = os.getcwd(),
defaultFile=ofile,
wildcard=file_choices,
style=wx.FD_SAVE|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if hasattr(self, 'fig'):
self.fig.savefig(path, transparent=transparent, dpi=dpi)
else:
self.canvas.print_figure(path, transparent=transparent, dpi=dpi)
if (path.find(self.launch_dir) == 0):
path = path[len(self.launch_dir)+1:]
self.write_message('Saved plot to %s' % path)
os.chdir(orig_dir)
|
java
|
public CellConstraints rchw(int row, int col, int rowSpan, int colSpan,
String encodedAlignments) {
CellConstraints result = rchw(row, col, rowSpan, colSpan);
result.setAlignments(encodedAlignments, false);
return result;
}
|
python
|
def reduce_filename(f):
r'''
Expects something like /tmp/tmpAjry4Gdsbench/test.weights.e5.XXX.YYY.pb
Where XXX is a variation on the model size for example
And where YYY is a const related to the training dataset
'''
f = os.path.basename(f).split('.')
return keep_only_digits(f[-3])
|
python
|
def _check_umi_type(bam_file):
"""Determine the type of UMI from BAM tags: standard or paired.
"""
with pysam.Samfile(bam_file, "rb") as in_bam:
for read in in_bam:
cur_umi = None
for tag in ["RX", "XC"]:
try:
cur_umi = read.get_tag(tag)
break
except KeyError:
pass
if cur_umi:
if "-" in cur_umi and len(cur_umi.split("-")) == 2:
return "paired", tag
else:
return "adjacency", tag
|
python
|
async def _handle_gzip_packed(self, message):
"""
Unpacks the data from a gzipped object and processes it:
gzip_packed#3072cfa1 packed_data:bytes = Object;
"""
self._log.debug('Handling gzipped data')
with BinaryReader(message.obj.data) as reader:
message.obj = reader.tgread_object()
await self._process_message(message)
|
python
|
def setReadOnly( self, state ):
"""
Sets the read only for this widget to the inputed state.
Differs per type, not all types support read only.
:param text | <str>
"""
if ( self._editor and hasattr(self._editor, 'setReadOnly') ):
self._editor.setReadOnly(state)
return True
return False
|
python
|
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
|
python
|
def create(input_dataset, target, feature=None, validation_set='auto',
warm_start='auto', batch_size=256,
max_iterations=100, verbose=True):
"""
Create a :class:`DrawingClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``target``
parameters will be extracted for training the drawing classifier.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type.
feature : string optional
Name of the column containing the input drawings. 'None' (the default)
indicates the column in `dataset` named "drawing" should be used as the
feature.
The feature column can contain both bitmap-based drawings as well as
stroke-based drawings. Bitmap-based drawing input can be a grayscale
tc.Image of any size.
Stroke-based drawing input must be in the following format:
Every drawing must be represented by a list of strokes, where each
stroke must be a list of points in the order in which they were drawn
on the canvas.
Each point must be a dictionary with two keys, "x" and "y", and their
respective values must be numerical, i.e. either integer or float.
validation_set : SFrame optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
warm_start : string optional
A string to denote which pretrained model to use. Set to "auto"
by default which uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset. To disable warm start, pass in None to this
argument. Here is a list of all the pretrained models that
can be passed in as this argument:
"auto": Uses quickdraw_245_v0
"quickdraw_245_v0": Uses a model trained on 245 of the 345 classes in the
Quick, Draw! dataset.
None: No Warm Start
batch_size: int optional
The number of drawings per training step. If not set, a default
value of 256 will be used. If you are getting memory errors,
try decreasing this value. If you have a powerful computer, increasing
this value may improve performance.
max_iterations : int optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model.
verbose : bool optional
If True, print progress updates and model details.
Returns
-------
out : DrawingClassifier
A trained :class:`DrawingClassifier` model.
See Also
--------
DrawingClassifier
Examples
--------
.. sourcecode:: python
# Train a drawing classifier model
>>> model = turicreate.drawing_classifier.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
"""
import mxnet as _mx
from mxnet import autograd as _autograd
from ._model_architecture import Model as _Model
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
from .._mxnet import _mxnet_utils
start_time = _time.time()
accepted_values_for_warm_start = ["auto", "quickdraw_245_v0", None]
# @TODO: Should be able to automatically choose number of iterations
# based on data size: Tracked in Github Issue #1576
# automatically infer feature column
if feature is None:
feature = _tkutl._find_only_drawing_column(input_dataset)
_raise_error_if_not_drawing_classifier_input_sframe(
input_dataset, feature, target)
if batch_size is not None and not isinstance(batch_size, int):
raise TypeError("'batch_size' must be an integer >= 1")
if batch_size is not None and batch_size < 1:
raise ValueError("'batch_size' must be >= 1")
if max_iterations is not None and not isinstance(max_iterations, int):
raise TypeError("'max_iterations' must be an integer >= 1")
if max_iterations is not None and max_iterations < 1:
raise ValueError("'max_iterations' must be >= 1")
is_stroke_input = (input_dataset[feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, feature) if is_stroke_input else input_dataset
iteration = 0
classes = dataset[target].unique()
classes = sorted(classes)
class_to_index = {name: index for index, name in enumerate(classes)}
validation_set_corrective_string = ("'validation_set' parameter must be "
+ "an SFrame, or None, or must be set to 'auto' for the toolkit to "
+ "automatically create a validation set.")
if isinstance(validation_set, _tc.SFrame):
_raise_error_if_not_drawing_classifier_input_sframe(
validation_set, feature, target)
is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)
validation_dataset = _extensions._drawing_classifier_prepare_data(
validation_set, feature) if is_validation_stroke_input else validation_set
elif isinstance(validation_set, str):
if validation_set == 'auto':
if dataset.num_rows() >= 100:
if verbose:
print ( "PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\n"
" You can set ``validation_set=None`` to disable validation tracking.\n")
dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)
else:
validation_set = None
validation_dataset = _tc.SFrame()
else:
raise _ToolkitError("Unrecognized value for 'validation_set'. "
+ validation_set_corrective_string)
elif validation_set is None:
validation_dataset = _tc.SFrame()
else:
raise TypeError("Unrecognized type for 'validation_set'."
+ validation_set_corrective_string)
train_loader = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=max_iterations)
train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,
feature_column=feature,
target_column=target,
class_to_index=class_to_index,
load_labels=True,
shuffle=True,
iterations=1)
if verbose and iteration == 0:
column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']
column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']
if validation_set is not None:
column_names.insert(3, 'validation_accuracy')
column_titles.insert(3, 'Validation Accuracy')
table_printer = _tc.util._ProgressTablePrinter(
column_names, column_titles)
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)
model = _Model(num_classes = len(classes), prefix="drawing_")
model_params = model.collect_params()
model_params.initialize(_mx.init.Xavier(), ctx=ctx)
if warm_start is not None:
if type(warm_start) is not str:
raise TypeError("'warm_start' must be a string or None. "
+ "'warm_start' can take in the following values: "
+ str(accepted_values_for_warm_start))
if warm_start not in accepted_values_for_warm_start:
raise _ToolkitError("Unrecognized value for 'warm_start': "
+ warm_start + ". 'warm_start' can take in the following "
+ "values: " + str(accepted_values_for_warm_start))
pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(
warm_start)
pretrained_model_params_path = pretrained_model.get_model_path()
model.load_params(pretrained_model_params_path,
ctx=ctx,
allow_missing=True)
softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()
model.hybridize()
trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')
train_accuracy = _mx.metric.Accuracy()
validation_accuracy = _mx.metric.Accuracy()
def get_data_and_label_from_batch(batch):
if batch.pad is not None:
size = batch_size - batch.pad
sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)
num_devices = min(sliced_data.shape[0], len(ctx))
batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)
batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)
else:
batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return batch_data, batch_label
def compute_accuracy(accuracy_metric, batch_loader):
batch_loader.reset()
accuracy_metric.reset()
for batch in batch_loader:
batch_data, batch_label = get_data_and_label_from_batch(batch)
outputs = []
for x, y in zip(batch_data, batch_label):
if x is None or y is None: continue
z = model(x)
outputs.append(z)
accuracy_metric.update(batch_label, outputs)
for train_batch in train_loader:
train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)
with _autograd.record():
# Inside training scope
for x, y in zip(train_batch_data, train_batch_label):
z = model(x)
# Computes softmax cross entropy loss.
loss = softmax_cross_entropy(z, y)
# Backpropagate the error for one iteration.
loss.backward()
# Make one step of parameter update. Trainer needs to know the
# batch size of data to normalize the gradient by 1/batch_size.
trainer.step(train_batch.data[0].shape[0])
# calculate training metrics
train_loss = loss.mean().asscalar()
train_time = _time.time() - start_time
if train_batch.iteration > iteration:
# Compute training accuracy
compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)
# Compute validation accuracy
if validation_set is not None:
compute_accuracy(validation_accuracy, validation_loader)
iteration = train_batch.iteration
if verbose:
kwargs = { "iteration": iteration,
"train_loss": float(train_loss),
"train_accuracy": train_accuracy.get()[1],
"time": train_time}
if validation_set is not None:
kwargs["validation_accuracy"] = validation_accuracy.get()[1]
table_printer.print_row(**kwargs)
state = {
'_model': model,
'_class_to_index': class_to_index,
'num_classes': len(classes),
'classes': classes,
'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),
'batch_size': batch_size,
'training_loss': train_loss,
'training_accuracy': train_accuracy.get()[1],
'training_time': train_time,
'validation_accuracy': validation_accuracy.get()[1],
# nan if validation_set=None
'max_iterations': max_iterations,
'target': target,
'feature': feature,
'num_examples': len(input_dataset)
}
return DrawingClassifier(state)
|
java
|
@JsonSetter("genre_ids")
@Override
public void setGenreIds(List<Integer> ids) {
this.genres = new ArrayList<>();
for (Integer id : ids) {
Genre g = new Genre();
g.setId(id);
genres.add(g);
}
}
|
python
|
def get_user(prompt=None):
"""
Prompts the user for his login name, defaulting to the USER environment
variable. Returns a string containing the username.
May throw an exception if EOF is given by the user.
:type prompt: str|None
:param prompt: The user prompt or the default one if None.
:rtype: string
:return: A username.
"""
# Read username and password.
try:
env_user = getpass.getuser()
except KeyError:
env_user = ''
if prompt is None:
prompt = "Please enter your user name"
if env_user is None or env_user == '':
user = input('%s: ' % prompt)
else:
user = input('%s [%s]: ' % (prompt, env_user))
if user == '':
user = env_user
return user
|
java
|
public void deleteClassPipeProperties(String className,
String pipeName, List<String> propertyNames) throws DevFailed {
databaseDAO.deleteClassPipeProperties(this, className, pipeName, propertyNames);
}
|
python
|
def where(cond, x, y):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, array, Variable, DataArray or Dataset
Values from which to choose. All dimension coordinates on these objects
must be aligned with each other and with `cond`.
Returns
-------
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> cond = xr.DataArray([True, False], dims=['x'])
>>> x = xr.DataArray([1, 2], dims=['y'])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where : equivalent methods
"""
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(duck_array_ops.where,
cond, x, y,
join='exact',
dataset_join='exact',
dask='allowed')
|
python
|
def _create_key(lang, instance):
"""Crea la clave única de la caché"""
model_name = instance.__class__.__name__
return "{0}__{1}_{2}".format(lang,model_name,instance.id)
|
java
|
public void match(ByteBuffer data, int size, IMtrieHandler func, XPub pub)
{
assert (data != null);
assert (func != null);
assert (pub != null);
Mtrie current = this;
int idx = 0;
while (true) {
// Signal the pipes attached to this node.
if (current.pipes != null) {
for (Pipe it : current.pipes) {
func.invoke(it, null, 0, pub);
}
}
// If we are at the end of the message, there's nothing more to match.
if (size == 0) {
break;
}
// If there are no subnodes in the trie, return.
if (current.count == 0) {
break;
}
byte c = data.get(idx);
// If there's one subnode (optimisation).
if (current.count == 1) {
if (c != current.min) {
break;
}
current = current.next[0];
idx++;
size--;
continue;
}
// If there are multiple subnodes.
if (c < current.min || c >= current.min + current.count) {
break;
}
if (current.next[c - current.min] == null) {
break;
}
current = current.next[c - current.min];
idx++;
size--;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.