language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def create(cls, obj):
"""
Create a new prototype object with the argument as the source
prototype.
.. Note:
This does not `initialize` the newly created object any
more than setting its prototype.
Calling the __init__ method is usually unnecessary as all
initialization data should be in the original prototype
object already.
If required, call __init__ explicitly:
>>> proto_obj = MyProtoObj(1, 2, 3)
>>> obj = MyProtoObj.create(proto_obj)
>>> obj.__init__(1, 2, 3)
"""
self = cls.__new__(cls)
self.__proto__ = obj
return self |
java | @Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case BpsimPackage.TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MAX:
return getMax();
case BpsimPackage.TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MEAN:
return getMean();
case BpsimPackage.TRUNCATED_NORMAL_DISTRIBUTION_TYPE__MIN:
return getMin();
case BpsimPackage.TRUNCATED_NORMAL_DISTRIBUTION_TYPE__STANDARD_DEVIATION:
return getStandardDeviation();
}
return super.eGet(featureID, resolve, coreType);
} |
java | @Override
public synchronized void put(Integer key, T value) {
int intKey = key;
while (this.data.size() <= intKey) {
this.data.add(null);
}
if (this.data.get(intKey) != null) {
throw new DatabaseException("Database already has a value for key [" + key + "]");
}
this.data.set(intKey, value);
} |
java | public SummarizedAttackVector withVectorCounters(SummarizedCounter... vectorCounters) {
if (this.vectorCounters == null) {
setVectorCounters(new java.util.ArrayList<SummarizedCounter>(vectorCounters.length));
}
for (SummarizedCounter ele : vectorCounters) {
this.vectorCounters.add(ele);
}
return this;
} |
java | @Override
public QueryRequest<ArrayNode> query(String fql) {
return this.query(fql, mapper.getTypeFactory().constructType(ArrayNode.class));
} |
java | private static double getDTBondF(double[] resultsH) {
double result = 0.0;
double SE = resultsH[0];
double PE = resultsH[1];
double PSC = resultsH[2];
double PIC = resultsH[3];
double ETP = resultsH[4];
double COUNTR = resultsH[6];
// System.out.println("SE : "+SE+", PE : "+PE+", PSC : "+PSC+", PIC : "+PIC+", ETP : "+ETP+", SPC : "+SPC+", COUNTR : "+COUNTR);
result = 0.1691 * SE + 1.1536 * PE + -6.3049 * PSC + -15.2638 * PIC + -0.2456 * ETP + -0.0139 * COUNTR + 2.114;
return result;
} |
python | def match_nth_tag_type(self, el, child):
"""Match tag type for `nth` matches."""
return(
(self.get_tag(child) == self.get_tag(el)) and
(self.get_tag_ns(child) == self.get_tag_ns(el))
) |
python | def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result |
java | private List<Coordinate> sortX(List<Coordinate> coordinates) {
List<Coordinate> sorted = new ArrayList<Coordinate>(coordinates);
Collections.sort(sorted, new XComparator());
return sorted;
} |
java | void shutdown() {
m_isShutdown = true;
try {
int waitFor = 1 - Math.min(m_inFlight.availablePermits(), -4);
for (int i = 0; i < waitFor; ++i) {
try {
if (m_inFlight.tryAcquire(1, TimeUnit.SECONDS)) {
m_inFlight.release();
break;
}
} catch (InterruptedException e) {
break;
}
}
m_ecryptgw.die();
EncryptFrame frame = null;
while ((frame = m_encryptedFrames.poll()) != null) {
frame.frame.release();
}
for (EncryptFrame ef: m_partialMessages) {
ef.frame.release();
}
m_partialMessages.clear();
if (m_encryptedMessages.refCnt() > 0) m_encryptedMessages.release();
} finally {
m_inFlight.drainPermits();
m_inFlight.release();
}
} |
java | public void buildFieldDetails(XMLNode node,
Content memberDetailsTree) throws Exception {
configuration.getBuilderFactory().
getFieldBuilder(writer).buildChildren(node, memberDetailsTree);
} |
java | public static <R, C, V> ImmutableTable<R, C, V> copyOf(Iterable<Table.Cell<R, C, V>> cells) {
final ImmutableTable.Builder<R, C, V> ret = ImmutableTable.builder();
for (final Table.Cell<R, C, V> cell : cells) {
ret.put(cell.getRowKey(), cell.getColumnKey(), cell.getValue());
}
return ret.build();
} |
python | def _ack_coord_handle(
coord, coord_handle, queue_mapper, msg_tracker, timing_state,
tile_proc_logger, stats_handler):
"""share code for acknowledging a coordinate"""
# returns tuple of (handle, error), either of which can be None
track_result = msg_tracker.done(coord_handle)
queue_handle = track_result.queue_handle
if not queue_handle:
return None, None
tile_queue = queue_mapper.get_queue(queue_handle.queue_id)
assert tile_queue, \
'Missing tile_queue: %s' % queue_handle.queue_id
parent_tile = None
if track_result.all_done:
parent_tile = track_result.parent_tile
try:
tile_queue.job_done(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
tile_proc_logger.error_job_done(
'tile_queue.job_done', e, stacktrace,
coord, parent_tile,
)
return queue_handle, e
if parent_tile is not None:
# we completed a tile pyramid and should log appropriately
start_time = timing_state['start']
stop_time = convert_seconds_to_millis(time.time())
tile_proc_logger.log_processed_pyramid(
parent_tile, start_time, stop_time)
stats_handler.processed_pyramid(
parent_tile, start_time, stop_time)
else:
try:
tile_queue.job_progress(queue_handle.handle)
except Exception as e:
stacktrace = format_stacktrace_one_line()
err_details = {"queue_handle": queue_handle.handle}
if isinstance(e, JobProgressException):
err_details = e.err_details
tile_proc_logger.error_job_progress(
'tile_queue.job_progress', e, stacktrace,
coord, parent_tile, err_details,
)
return queue_handle, e
return queue_handle, None |
java | public static String toRGB(Color color) {
if(color != null) {
return "rgb(" + Integer.toString(color.getRed()) + "," + Integer.toString(color.getGreen()) + "," + Integer.toString(color.getBlue()) + ")";
}
return null;
} |
java | @Override
public DeleteBatchPredictionResult deleteBatchPrediction(DeleteBatchPredictionRequest request) {
request = beforeClientExecution(request);
return executeDeleteBatchPrediction(request);
} |
java | public void merge(GridHubConfiguration other) {
if (other == null) {
return;
}
super.merge(other);
if (isMergeAble(CapabilityMatcher.class, other.capabilityMatcher, capabilityMatcher)) {
capabilityMatcher = other.capabilityMatcher;
}
if (isMergeAble(Integer.class, other.newSessionWaitTimeout, newSessionWaitTimeout)) {
newSessionWaitTimeout = other.newSessionWaitTimeout;
}
if (isMergeAble(Prioritizer.class, other.prioritizer, prioritizer)) {
prioritizer = other.prioritizer;
}
if (isMergeAble(Boolean.class, other.throwOnCapabilityNotPresent, throwOnCapabilityNotPresent)) {
throwOnCapabilityNotPresent = other.throwOnCapabilityNotPresent;
}
if (isMergeAble(String.class, other.registry, registry)) {
registry = other.registry;
}
} |
python | def confirm_phone_number(self, sms_code):
"""Confirm phone number with the recieved SMS code
:param sms_code: sms code
:type sms_code: :class:`str`
:return: success (returns ``False`` on request fail/timeout)
:rtype: :class:`bool`
"""
sess = self._get_web_session()
try:
resp = sess.post('https://steamcommunity.com/steamguard/phoneajax',
data={
'op': 'check_sms_code',
'arg': sms_code,
'checkfortos': 1,
'skipvoip': 1,
'sessionid': sess.cookies.get('sessionid', domain='steamcommunity.com'),
},
timeout=15).json()
except:
return False
return (resp or {}).get('success', False) |
python | def get_model(self, sm_id):
"""
Extract a CompositeSourceModel instance containing the single
model of index `sm_id`.
"""
sm = self.source_models[sm_id]
if self.source_model_lt.num_samples:
self.source_model_lt.num_samples = sm.samples
new = self.__class__(self.gsim_lt, self.source_model_lt, [sm],
self.optimize_same_id)
new.sm_id = sm_id
return new |
python | def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex) |
java | protected boolean isStartViewAvailableOnRoot() {
if (!m_startview.isEnabled()) {
return false;
}
return !m_startview.getValue().equals(CmsPageEditorConfiguration.APP_ID)
& !m_startview.getValue().equals(CmsSitemapEditorConfiguration.APP_ID);
} |
python | def RGBA(self, val):
"""Set the color using an Nx4 array of RGBA uint8 values"""
# need to convert to normalized float
val = np.atleast_1d(val).astype(np.float32) / 255
self.rgba = val |
python | def set_bucket_props(self, bucket, props):
"""
Set the properties on the bucket object given
"""
bucket_type = self._get_bucket_type(bucket.bucket_type)
url = self.bucket_properties_path(bucket.name,
bucket_type=bucket_type)
headers = {'Content-Type': 'application/json'}
content = json.dumps({'props': props})
# Run the request...
status, _, body = self._request('PUT', url, headers, content)
if status == 401:
raise SecurityError('Not authorized to set bucket properties.')
elif status != 204:
raise RiakError('Error setting bucket properties.')
return True |
python | def annual_cooling_design_day_004(self):
"""A design day object representing the annual 0.4% cooling design day."""
if bool(self._summer_des_day_dict) is True:
tau = None
month_num = int(self._summer_des_day_dict['Month'])
if self._monthly_tau_beam != [] and self._monthly_tau_diffuse != [] \
and self._monthly_tau_beam[month_num - 1] is not None and \
self._monthly_tau_diffuse[month_num - 1] is not None:
tau = (self._monthly_tau_beam[month_num - 1],
self._monthly_tau_diffuse[month_num - 1])
return DesignDay.from_ashrae_dict_cooling(
self._summer_des_day_dict, self.location, False,
self._stand_press_at_elev, tau)
else:
return None |
java | private CmsModule getModuleForFileName(String fileName) {
String moduleName = fileName;
if (fileName.endsWith(SUFFIX)) {
moduleName = fileName.substring(0, fileName.length() - SUFFIX.length());
}
CmsModule result = OpenCms.getModuleManager().getModule(moduleName);
return result;
} |
java | private PropertyAdapter createLocalDMAdapter(InetSocketAddress local,
InetSocketAddress host, Map options) throws KNXException
{
return new KnIPDeviceMgmtAdapter(local, host, options.containsKey("nat"), null,
false);
} |
python | def writefile(filename, data, binary=False):
""" Write the provided data to the file.
`filename`
Filename to write.
`data`
Data buffer to write.
`binary`
Set to ``True`` to indicate a binary file.
Returns boolean.
"""
try:
flags = 'w' if not binary else 'wb'
with open(filename, flags) as _file:
_file.write(data)
_file.flush()
return True
except (OSError, IOError):
return False |
java | private boolean checkUniqueMasterCitizenNumber(final String ptaxNumber) {
final int checkSum = ptaxNumber.charAt(12) - '0';
final int sum = ((ptaxNumber.charAt(0) - '0' + ptaxNumber.charAt(6) - '0') * 7 //
+ (ptaxNumber.charAt(1) - '0' + ptaxNumber.charAt(7) - '0') * 6 //
+ (ptaxNumber.charAt(2) - '0' + ptaxNumber.charAt(8) - '0') * 5 //
+ (ptaxNumber.charAt(3) - '0' + ptaxNumber.charAt(9) - '0') * 4 //
+ (ptaxNumber.charAt(4) - '0' + ptaxNumber.charAt(10) - '0') * 3 //
+ (ptaxNumber.charAt(5) - '0' + ptaxNumber.charAt(11) - '0') * 2) //
% MODULO_11;
int calculatedCheckSum = MODULO_11 - sum;
if (calculatedCheckSum == 10) {
calculatedCheckSum = 0;
}
return checkSum == calculatedCheckSum;
} |
python | def atlas_peer_update_health( peer_hostport, received_response, peer_table=None ):
"""
Mark the given peer as alive at this time.
Update times at which we contacted it,
and update its health score.
Use the global health table by default,
or use the given health info if set.
"""
with AtlasPeerTableLocked(peer_table) as ptbl:
if peer_hostport not in ptbl.keys():
return False
# record that we contacted this peer, and whether or not we useful info from it
now = time_now()
# update timestamps; remove old data
new_times = []
for (t, r) in ptbl[peer_hostport]['time']:
if t + atlas_peer_lifetime_interval() < now:
continue
new_times.append((t, r))
new_times.append((now, received_response))
ptbl[peer_hostport]['time'] = new_times
return True |
java | public static String joinStrings(List<String> strings, boolean fixCase, char withChar) {
if (strings == null || strings.size() == 0) {
return "";
}
StringBuilder result = null;
for (String s : strings) {
if (fixCase) {
s = fixCase(s);
}
if (result == null) {
result = new StringBuilder(s);
} else {
result.append(withChar);
result.append(s);
}
}
return result.toString();
} |
python | def OnEnterSelectionMode(self, event):
"""Event handler for entering selection mode, disables cell edits"""
self.grid.sel_mode_cursor = list(self.grid.actions.cursor)
self.grid.EnableDragGridSize(False)
self.grid.EnableEditing(False) |
java | public static boolean isDefined(@NonNull Class<? extends EnumValue> enumClass, @NonNull String name) {
return GLOBAL_REPOSITORY.containsKey(toKey(enumClass, name));
} |
java | private OnClickListener createRemovePreferenceHeaderClickListener() {
return new OnClickListener() {
@Override
public void onClick(final DialogInterface dialog, final int which) {
int position = spinner.getSelectedItemPosition();
listener.onRemovePreferenceHeader(position);
}
};
} |
python | def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs):
"""
Do a repository report: call the report function for each Repository
Args:
repos (iterable): iterable of Repository instances
report (string): report name
output (writeable): output stream to print to
Yields:
Repository subclass
"""
for i, repo in enumerate(repos):
log.debug(str((i, next(repo.origin_report()))))
try:
if repo is not None:
reportfunc = REPORT_TYPES.get(report)
if reportfunc is None:
raise Exception("Unrecognized report type: %r (%s)" %
(report, ', '.join(REPORT_TYPES.keys())))
for l in reportfunc(repo, *args, **kwargs):
print(l, file=output)
except Exception as e:
log.error(repo)
log.error(report)
log.error(e)
raise
yield repo |
python | def format_seq(self, outstream=None, linewidth=70):
"""
Print a sequence in a readable format.
:param outstream: if `None`, formatted sequence is returned as a
string; otherwise, it is treated as a file-like
object and the formatted sequence is printed to the
outstream
:param linewidth: width for wrapping sequences over multiple lines; set
to 0 for no wrapping
"""
if linewidth == 0 or len(self.seq) <= linewidth:
if outstream is None:
return self.seq
else:
print(self.seq, file=outstream)
return
i = 0
seq = ''
while i < len(self.seq):
if outstream is None:
seq += self.seq[i:i+linewidth] + '\n'
else:
print(self.seq[i:i+linewidth], file=outstream)
i += linewidth
if outstream is None:
return seq |
java | public Quaternionf normalize(Quaternionf dest) {
float invNorm = (float) (1.0 / Math.sqrt(x * x + y * y + z * z + w * w));
dest.x = x * invNorm;
dest.y = y * invNorm;
dest.z = z * invNorm;
dest.w = w * invNorm;
return dest;
} |
python | def _on_write(self, sender, *args, **kwargs):
"""
Internal handler for writing to the device.
"""
self.on_write(data=kwargs.get('data', None)) |
java | public Vector3f div(Vector3fc v, Vector3f dest) {
dest.x = x / v.x();
dest.y = y / v.y();
dest.z = z / v.z();
return dest;
} |
python | def increment(version):
"""Return an incremented version string."""
release_version = os.environ.get("RELEASE_VERSION", None)
if release_version is not None:
return release_version
if isinstance(version, LegacyVersion):
msg = """{0} is considered a legacy version and does not
support automatic incrementing. Please bring your version
numbering into PEP440 standards and then it can be
automatically incremented.
"""
raise Exception(msg.format(version))
release_type = os.environ.get("RELEASE_TYPE", "micro")
v = version._version
# epoch
epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!")
pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre")
post_name, post = VersionUtils.get_version_number(v, 4, None, "post")
dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev")
_, major = VersionUtils.get_version_number(v[1], 0, 0)
_, minor = VersionUtils.get_version_number(v[1], 1, None)
_, micro = VersionUtils.get_version_number(v[1], 2, None)
# Handle dev/pre/post
if release_type == "pre":
micro, post, pre = VersionUtils.process_pre(micro, post, pre)
if release_type == "post":
dev, post = VersionUtils.process_post(dev, post)
if release_type == "dev":
dev = VersionUtils.process_dev(dev)
if release_type == "micro":
dev, micro, minor, post, pre = VersionUtils.process_micro(
dev, micro, minor, post, pre
)
if release_type == "minor":
dev, micro, minor, post, pre = VersionUtils.process_minor(
dev, micro, minor, post, pre
)
if release_type == "major":
dev, major, micro, minor, post, pre = VersionUtils.process_major(
dev, major, micro, minor, post, pre
)
# Handle Epoch
if release_type == "epoch":
dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(
dev, epoch, major, micro, minor, post, pre
)
local = "".join(v[5] or []) or None
version_list = [major, minor, micro]
if release_type not in ["epoch", "major", "minor", "micro", "pre"]:
version_list += list(v[1][3:])
version_string = ".".join([str(x) for x in version_list if x or x == 0])
if epoch:
version_string = str(epoch) + epoch_name + version_string
if pre is not None:
version_string = VersionUtils.calc_pre_version_string(
pre, pre_name, version_string
)
if post is not None:
version_string += "." + post_name + str(post)
if dev is not None:
version_string += "." + dev_name + str(dev)
if local is not None:
version_string += "." + str(local)
return version_string |
java | @Override
protected void adapt(LogLevel logLevel, String message, Class clazz)
{
Logger logger = Logger.getLogger(clazz);
Level level = convertToLog4jLevel(logLevel);
logger.log(DebugLogTee.class.getName(), level, message, null);
} |
python | def model(model_names):
"""
Creates the example directory structure necessary for a model service.
"""
# for each model name we need to create
for model_name in model_names:
# the template context
context = {
'name': model_name,
}
# render the model template
render_template(template='common', context=context)
render_template(template='model', context=context) |
java | @Override
public String getReverseRouteFor(Class<? extends Controller> clazz, String method, Map<String,
Object> params) {
return getReverseRouteFor(clazz.getName(), method, params);
} |
python | def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name |
java | private Ref scope(String idStr) {
if (!limited && idStr.equals("var")) {
String name = identifier(false);
if (name != null) {
cfml.removeSpace();
return new Variable(new lucee.runtime.interpreter.ref.var.Scope(ScopeSupport.SCOPE_VAR), name, limited);
}
}
int scope = limited ? Scope.SCOPE_UNDEFINED : VariableInterpreter.scopeString2Int(pc != null && pc.ignoreScopes(), idStr);
if (scope == Scope.SCOPE_UNDEFINED) {
return new Variable(new lucee.runtime.interpreter.ref.var.Scope(Scope.SCOPE_UNDEFINED), idStr, limited);
}
return new lucee.runtime.interpreter.ref.var.Scope(scope);
} |
java | public static CompilationFailedException create(final IMessage[] errors) {
final StringBuilder sb = new StringBuilder();
sb.append("AJC compiler errors:").append(LINE_SEPARATOR);
for (final IMessage error : errors) {
sb.append(error.toString()).append(LINE_SEPARATOR);
}
return new CompilationFailedException(sb.toString());
} |
python | def _get_coordinator_for_group(self, consumer_group):
"""Returns the coordinator (broker) for a consumer group
Returns the broker for a given consumer group or
Raises ConsumerCoordinatorNotAvailableError
"""
if self.consumer_group_to_brokers.get(consumer_group) is None:
yield self.load_consumer_metadata_for_group(consumer_group)
returnValue(self.consumer_group_to_brokers.get(consumer_group)) |
java | public Postcard withInt(@Nullable String key, int value) {
mBundle.putInt(key, value);
return this;
} |
python | def search( self, base=False, trim=False, objects=False, **kwargs ):
""" Returns matching entries for search in ldap
structured as [(dn, {attributes})]
UNLESS searching by dn, in which case the first match
is returned
"""
scope = pyldap.SCOPE_SUBTREE
if not base:
base = self.users
filterstr =''
for key, value in kwargs.iteritems():
filterstr += '({0}={1})'.format(key,value)
if key == 'dn':
filterstr = '(objectClass=*)'
base = value
scope = pyldap.SCOPE_BASE
break
if len(kwargs) > 1:
filterstr = '(&'+filterstr+')'
result = self.ldap.search_s(base, pyldap.SCOPE_SUBTREE, filterstr, ['*','+'])
if base == self.users:
for member in result:
groups = self.getGroups(member[0])
member[1]['groups'] = groups
if 'eboard' in member[1]['groups']:
member[1]['committee'] = self.search(base=self.committees, \
head=member[0])[0][1]['cn'][0]
if objects:
return self.memberObjects(result)
finalResult = self.trimResult(result) if trim else result
return finalResult |
java | private void addHeaderCode(Node script) {
script.addChildToFront(createConditionalObjectDecl(JS_INSTRUMENTATION_OBJECT_NAME, script));
// Make subsequent usages of "window" and "window.top" work in a Web Worker context.
script.addChildToFront(
compiler.parseSyntheticCode(
"if (!self.window) { self.window = self; self.window.top = self; }")
.removeFirstChild()
.useSourceInfoIfMissingFromForTree(script));
} |
java | public Matrix4d frustumAabb(Vector3d min, Vector3d max) {
double minX = Double.POSITIVE_INFINITY;
double minY = Double.POSITIVE_INFINITY;
double minZ = Double.POSITIVE_INFINITY;
double maxX = Double.NEGATIVE_INFINITY;
double maxY = Double.NEGATIVE_INFINITY;
double maxZ = Double.NEGATIVE_INFINITY;
for (int t = 0; t < 8; t++) {
double x = ((t & 1) << 1) - 1.0;
double y = (((t >>> 1) & 1) << 1) - 1.0;
double z = (((t >>> 2) & 1) << 1) - 1.0;
double invW = 1.0 / (m03 * x + m13 * y + m23 * z + m33);
double nx = (m00 * x + m10 * y + m20 * z + m30) * invW;
double ny = (m01 * x + m11 * y + m21 * z + m31) * invW;
double nz = (m02 * x + m12 * y + m22 * z + m32) * invW;
minX = minX < nx ? minX : nx;
minY = minY < ny ? minY : ny;
minZ = minZ < nz ? minZ : nz;
maxX = maxX > nx ? maxX : nx;
maxY = maxY > ny ? maxY : ny;
maxZ = maxZ > nz ? maxZ : nz;
}
min.x = minX;
min.y = minY;
min.z = minZ;
max.x = maxX;
max.y = maxY;
max.z = maxZ;
return this;
} |
python | def rollback(self, number=0):
"""
Will rollback the configuration to a previous state.
Can be called also when
:param number: How many steps back in the configuration history must look back.
:raise pyPluribus.exceptions.RollbackError: In case the configuration cannot be rolled back.
"""
if number < 0:
raise pyPluribus.exceptions.RollbackError("Please provide a positive number to rollback to!")
available_configs = len(self._config_history)
max_rollbacks = available_configs - 2
if max_rollbacks < 0:
raise pyPluribus.exceptions.RollbackError("Cannot rollback: \
not enough configration history available!")
if max_rollbacks > 0 and number > max_rollbacks:
raise pyPluribus.exceptions.RollbackError("Cannot rollback more than {cfgs} configurations!\
".format(cfgs=max_rollbacks))
config_location = 1 # will load the initial config worst case (user never commited, but wants to discard)
if max_rollbacks > 0: # in case of previous commit(s) will be able to load a specific configuration
config_location = available_configs - number - 1 # stored in location len() - rollabck_nb - 1
# covers also the case of discard uncommitted changes (rollback 0)
desired_config = self._config_history[config_location]
try:
self._upload_config_content(desired_config, rollbacked=True)
except pyPluribus.exceptions.ConfigLoadError as loaderr:
raise pyPluribus.exceptions.RollbackError("Cannot rollback: {err}".format(err=loaderr))
del self._config_history[(config_location+1):] # delete all newer configurations than the config rolled back
self._last_working_config = desired_config
self._committed = True
self._config_changed = False
return True |
java | private Process launchBundleAudit(File folder) throws AnalysisException {
if (!folder.isDirectory()) {
throw new AnalysisException(String.format("%s should have been a directory.", folder.getAbsolutePath()));
}
final List<String> args = new ArrayList<>();
final String bundleAuditPath = getSettings().getString(Settings.KEYS.ANALYZER_BUNDLE_AUDIT_PATH);
File bundleAudit = null;
if (bundleAuditPath != null) {
bundleAudit = new File(bundleAuditPath);
if (!bundleAudit.isFile()) {
LOGGER.warn("Supplied `bundleAudit` path is incorrect: {}", bundleAuditPath);
bundleAudit = null;
}
}
args.add(bundleAudit != null && bundleAudit.isFile() ? bundleAudit.getAbsolutePath() : "bundle-audit");
args.add("check");
args.add("--verbose");
final ProcessBuilder builder = new ProcessBuilder(args);
builder.directory(folder);
try {
LOGGER.info("Launching: {} from {}", args, folder);
return builder.start();
} catch (IOException ioe) {
throw new AnalysisException("bundle-audit initialization failure; this error can be ignored if you are not analyzing Ruby. "
+ "Otherwise ensure that bundle-audit is installed and the path to bundle audit is correctly specified", ioe);
}
} |
java | public void serialize(XMLExtendedStreamWriter writer, GlobalConfiguration globalConfiguration, Map<String, Configuration> configurations) throws XMLStreamException {
writer.writeStartDocument();
writer.writeStartElement("infinispan");
Serializer serializer = new Serializer();
serializer.serialize(writer, new ConfigurationHolder(globalConfiguration, configurations));
writer.writeEndElement();
writer.writeEndDocument();
} |
python | def com_google_fonts_check_name_typographicsubfamilyname(ttFont, style_with_spaces):
""" Check name table: TYPOGRAPHIC_SUBFAMILY_NAME entries. """
from fontbakery.utils import name_entry_id
failed = False
if style_with_spaces in ['Regular',
'Italic',
'Bold',
'Bold Italic']:
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME:
failed = True
yield FAIL, Message("ribbi",
("Font style is '{}' and, for that reason,"
" it is not expected to have a "
"{} entry!").format(style_with_spaces,
name_entry_id(name)))
else:
expected_value = style_with_spaces
has_entry = False
for name in ttFont['name'].names:
if name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME:
string = name.string.decode(name.getEncoding()).strip()
if string == expected_value:
has_entry = True
else:
failed = True
yield FAIL, Message("non-ribbi-bad-value",
("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
string))
if not failed and not has_entry:
failed = True
yield FAIL, Message("non-ribbi-lacks-entry",
("non-RIBBI fonts must have a"
" TYPOGRAPHIC_SUBFAMILY_NAME entry"
" on the name table."))
if not failed:
yield PASS, "TYPOGRAPHIC_SUBFAMILY_NAME entries are all good." |
python | def compiled_init_func(self):
"""Returns compiled init function"""
def get_column_assignment(column_name):
return ALCHEMY_TEMPLATES.col_assignment.safe_substitute(col_name=column_name)
def get_compiled_args(arg_name):
return ALCHEMY_TEMPLATES.func_arg.safe_substitute(arg_name=arg_name)
join_string = "\n" + self.tab + self.tab
column_assignments = join_string.join([get_column_assignment(n) for n in self.columns])
init_args = ", ".join(get_compiled_args(n) for n in self.columns)
return ALCHEMY_TEMPLATES.init_function.safe_substitute(col_assignments=column_assignments,
init_args=init_args) |
python | def close(self, force=False):
"""
close opened file
:param force: force closing of externally opened file or buffer
"""
if self.__write:
self.write = self.__write_adhoc
self.__write = False
if not self._is_buffer or force:
self._file.close() |
python | def _read_utf(cls, data, pos, kind=None):
"""
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs.
"""
size = b2.unpack_from(data, pos)[0]
pos += 2
try:
return data[pos:pos+size].decode('utf-8'), pos+size
except (UnicodeEncodeError, UnicodeDecodeError) as e:
raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))) if kind else \
("Encountered bad UTF-8 data: %s" % str(e))) |
python | def set_default_args(self, default_args):
"""Set default args for commands in collection.
Default args are used when the corresponding args aren't passed
on the command line or in a direct call.
"""
for name, args in default_args.items():
command = self[name]
command.default_args = default_args.get(command.name) or {} |
java | public static <T> void writeWorkBook(File file, int excelType, List<T> beans) throws WriteExcelException {
WriteExcelUtils.writeWorkBook(file, excelType, beans, null);
} |
python | def infer_argument(self, funcnode, name, context):
"""infer a function argument value according to the call context
Arguments:
funcnode: The function being called.
name: The name of the argument whose value is being inferred.
context: Inference context object
"""
if name in self.duplicated_keywords:
raise exceptions.InferenceError(
"The arguments passed to {func!r} " " have duplicate keywords.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
# Look into the keywords first, maybe it's already there.
try:
return self.keyword_arguments[name].infer(context)
except KeyError:
pass
# Too many arguments given and no variable arguments.
if len(self.positional_arguments) > len(funcnode.args.args):
if not funcnode.args.vararg:
raise exceptions.InferenceError(
"Too many positional arguments "
"passed to {func!r} that does "
"not have *args.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
positional = self.positional_arguments[: len(funcnode.args.args)]
vararg = self.positional_arguments[len(funcnode.args.args) :]
argindex = funcnode.args.find_argname(name)[0]
kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs}
kwargs = {
key: value
for key, value in self.keyword_arguments.items()
if key not in kwonlyargs
}
# If there are too few positionals compared to
# what the function expects to receive, check to see
# if the missing positional arguments were passed
# as keyword arguments and if so, place them into the
# positional args list.
if len(positional) < len(funcnode.args.args):
for func_arg in funcnode.args.args:
if func_arg.name in kwargs:
arg = kwargs.pop(func_arg.name)
positional.append(arg)
if argindex is not None:
# 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ("method", "classmethod"):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if isinstance(boundnode, nodes.ClassDef):
# Verify that we're accessing a method
# of the metaclass through a class, as in
# `cls.metaclass_method`. In this case, the
# first argument is always the class.
method_scope = funcnode.parent.scope()
if method_scope is boundnode.metaclass():
return iter((boundnode,))
if funcnode.type == "method":
if not isinstance(boundnode, bases.Instance):
boundnode = bases.Instance(boundnode)
return iter((boundnode,))
if funcnode.type == "classmethod":
return iter((boundnode,))
# if we have a method, extract one position
# from the index, so we'll take in account
# the extra parameter represented by `self` or `cls`
if funcnode.type in ("method", "classmethod"):
argindex -= 1
# 2. search arg index
try:
return self.positional_arguments[argindex].infer(context)
except IndexError:
pass
if funcnode.args.kwarg == name:
# It wants all the keywords that were passed into
# the call site.
if self.has_invalid_keywords():
raise exceptions.InferenceError(
"Inference failed to find values for all keyword arguments "
"to {func!r}: {unpacked_kwargs!r} doesn't correspond to "
"{keyword_arguments!r}.",
keyword_arguments=self.keyword_arguments,
unpacked_kwargs=self._unpacked_kwargs,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
kwarg = nodes.Dict(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
kwarg.postinit(
[(nodes.const_factory(key), value) for key, value in kwargs.items()]
)
return iter((kwarg,))
if funcnode.args.vararg == name:
# It wants all the args that were passed into
# the call site.
if self.has_invalid_arguments():
raise exceptions.InferenceError(
"Inference failed to find values for all positional "
"arguments to {func!r}: {unpacked_args!r} doesn't "
"correspond to {positional_arguments!r}.",
positional_arguments=self.positional_arguments,
unpacked_args=self._unpacked_args,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
args = nodes.Tuple(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
args.postinit(vararg)
return iter((args,))
# Check if it's a default parameter.
try:
return funcnode.args.default_value(name).infer(context)
except exceptions.NoDefault:
pass
raise exceptions.InferenceError(
"No value found for argument {name} to " "{func!r}",
call_site=self,
func=funcnode,
arg=name,
context=context,
) |
java | public static Set<Method> getMethods(Class<?> clazz, Filter<Method>filter) {
Set<Method> methods = new HashSet<Method>();
Class<?> cursor = clazz;
while(cursor != null && cursor != Object.class) {
// get all methods and apply filters
methods.addAll(Filter.apply(filter, cursor.getDeclaredMethods()));
// up one step on the hierarchy
cursor = cursor.getSuperclass();
}
return methods;
} |
python | def multi_lpop(self, queue, number, transaction=False):
''' Pops multiple elements from a list '''
try:
self._multi_lpop_pipeline(self, queue, number)
except:
raise |
python | def copy(self, memo=None, which=None):
"""
Returns a (deep) copy of the current parameter handle.
All connections to parents of the copy will be cut.
:param dict memo: memo for deepcopy
:param Parameterized which: parameterized object which started the copy process [default: self]
"""
#raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy"
if memo is None:
memo = {}
import copy
# the next part makes sure that we do not include parents in any form:
parents = []
if which is None:
which = self
which.traverse_parents(parents.append) # collect parents
for p in parents:
if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied
if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient
if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
copy = copy.deepcopy(self, memo) # and start the copy
copy._parent_index_ = None
copy._trigger_params_changed()
return copy |
python | def run_validators(self, value):
"""
Test the given value against all the validators on the field,
and either raise a `ValidationError` or simply return.
"""
errors = []
for validator in self.validators:
if hasattr(validator, 'set_context'):
validator.set_context(self)
try:
validator(value)
except ValidationError as exc:
# If the validation error contains a mapping of fields to
# errors then simply raise it immediately rather than
# attempting to accumulate a list of errors.
if isinstance(exc.detail, dict):
raise
errors.extend(exc.detail)
except DjangoValidationError as exc:
errors.extend(exc.messages)
if errors:
raise ValidationError(errors) |
python | def Minus(self, other):
"""
Returns a new point which is the pointwise subtraction of other from
self.
"""
return Point(self.x - other.x,
self.y - other.y,
self.z - other.z) |
java | @Nonnull
public static <T1, T2, T3> LToIntTriFunction<T1, T2, T3> toIntTriFunctionFrom(Consumer<LToIntTriFunctionBuilder<T1, T2, T3>> buildingFunction) {
LToIntTriFunctionBuilder builder = new LToIntTriFunctionBuilder();
buildingFunction.accept(builder);
return builder.build();
} |
python | def human_size(size_bytes, precision=0):
"""
Format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes',0),('KB',0),('MB',1),('GB',2),('TB',2), ('PB',2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix) |
python | def t_measures(dirname, time_func, measure_func):
"""Calculate a measure over time for a single output directory,
and its uncertainty.
Parameters
----------
dirname: str
Path to a model output directory.
time_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns its time.
measure_func: function
Function which takes a :class:`Model` instance as a single argument,
and returns the measure of interest, and its uncertainty.
Returns
-------
ts: np.ndarray
Times.
measures: np.ndarray
Measures.
measure_errs: np.ndarray
Measure uncertainties.
"""
ts, measures, measure_errs = [], [], []
for fname in get_filenames(dirname):
m = filename_to_model(fname)
ts.append(time_func(m))
meas, meas_err = measure_func(m)
measures.append(meas)
measure_errs.append(meas_err)
return np.array(ts), np.array(measures), np.array(measure_errs) |
java | public SIBusMessage peek()
throws SISessionUnavailableException, SIResourceException, SIIncorrectCallException
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "peek");
checkValidState("peek");
_localConsumerPoint.checkNotClosed();
SIBusMessage nextMessage = null;
if(!_seenSingleMessage)
{
// Because this message is not in the message store there is no need to copy it
// before giving it to a caller unless this is pubsub, in which case there may be
// other subscriptions referencing the same message, in which case we must copy the
// message (unless the caller indicates that they won't be altering it)
if(_isPubsub &&
((ConnectionImpl)(_localConsumerPoint.getConsumerSession().getConnection())).getMessageCopiedWhenReceived())
{
try
{
nextMessage = (_singleMessage.getMessage()).getReceived();
}
catch (MessageCopyFailedException e)
{
FFDCFilter.processException(
e,
"com.ibm.ws.sib.processor.impl.SingleLockedMessageEnumerationImpl.peek",
"1:456:1.44",
this);
SibTr.exception(tc, e);
SibTr.error(tc, "INTERNAL_MESSAGING_ERROR_CWSIP0002",
new Object[] {
"com.ibm.ws.sib.processor.impl.SingleLockedMessageEnumerationImpl",
"1:463:1.44",
e });
_seenSingleMessage = false;
_messageAvailable = false;
if (TraceComponent.isAnyTracingEnabled() && CoreSPILockedMessageEnumeration.tc.isEntryEnabled())
SibTr.exit(CoreSPILockedMessageEnumeration.tc, "peek", e);
throw new SIResourceException(
nls.getFormattedMessage(
"INTERNAL_MESSAGING_ERROR_CWSIP0002",
new Object[] {
"com.ibm.ws.sib.processor.impl.SingleLockedMessageEnumerationImpl",
"1:477:1.44",
e },
null),
e);
}
}
else
nextMessage = _singleMessage.getMessage();
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "peek", nextMessage);
return nextMessage;
} |
java | @Override
public void report() {
for (Map.Entry<String, Map<String, BugInstance>> thisEntry : possibleBugs.entrySet()) {
Map<String, BugInstance> equalsClassesMap = thisEntry.getValue();
for (Map.Entry<String, BugInstance> equalsEntry : equalsClassesMap.entrySet()) {
String equalsCls = equalsEntry.getKey();
Map<String, BugInstance> reverseEqualsClassMap = possibleBugs.get(equalsCls);
if (reverseEqualsClassMap == null) {
bugReporter.reportBug(equalsClassesMap.values().iterator().next());
break;
}
if (!reverseEqualsClassMap.containsKey(thisEntry.getKey())) {
bugReporter.reportBug(equalsClassesMap.values().iterator().next());
break;
}
}
}
possibleBugs.clear();
} |
python | def update_project(self, project_key, **kwargs):
"""Update an existing project
:param project_key: Username and unique identifier of the creator of a
project in the form of owner/id.
:type project_key: str
:param title: Project title
:type title: str
:param objective: Short project objective.
:type objective: str, optional
:param summary: Long-form project summary.
:type summary: str, optional
:param tags: Project tags. Letters numbers and spaces
:type tags: list, optional
:param license: Project license
:type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY',
'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'}
:param visibility: Project visibility
:type visibility: {'OPEN', 'PRIVATE'}
:param files: File name as dict, source URLs, description and labels()
as properties
:type files: dict, optional
*Description and labels are optional*
:param linked_datasets: Initial set of linked datasets.
:type linked_datasets: list of object, optional
:returns: message object
:rtype: object
:raises RestApiException: If a server error occurs
Examples
--------
>>> import datadotworld as dw
>>> api_client = dw.api_client()
>>> api_client.update_project(
... 'username/test-project',
... tags=['demo', 'datadotworld']) # doctest: +SKIP
"""
request = self.__build_project_obj(
lambda: _swagger.ProjectPatchRequest(),
lambda name, url, description, labels:
_swagger.FileCreateOrUpdateRequest(
name=name,
source=_swagger.FileSourceCreateOrUpdateRequest(url=url),
description=description,
labels=labels),
kwargs)
owner_id, project_id = parse_dataset_key(project_key)
try:
return self._projects_api.patch_project(owner_id,
project_id,
body=request)
except _swagger.rest.ApiException as e:
raise RestApiError(cause=e) |
java | private static List<ProxyAction> createProxyActions(final Slice<Action> actionBeans) {
final List<ProxyAction> proxyActions = new ArrayList<>();
for (final Action action : actionBeans) {
final ProxyAction proxyAction = new ProxyAction();
final String dsNameVersion = action.getDistributionSet().getName() + ":"
+ action.getDistributionSet().getVersion();
proxyAction.setActive(action.isActive());
proxyAction.setIsActiveDecoration(buildIsActiveDecoration(action));
proxyAction.setDsNameVersion(dsNameVersion);
proxyAction.setAction(action);
proxyAction.setId(action.getId());
proxyAction.setLastModifiedAt(action.getLastModifiedAt());
proxyAction.setRolloutName(action.getRollout() != null ? action.getRollout().getName() : "");
proxyAction.setStatus(action.getStatus());
proxyAction.setMaintenanceWindow(
action.hasMaintenanceSchedule() ? buildMaintenanceWindowDisplayText(action) : "");
proxyActions.add(proxyAction);
}
return proxyActions;
} |
python | def triggers(self):
"""Get a camera's triggers."""
capabilities = self.capabilities
if not capabilities:
return None
for capability in capabilities:
if not isinstance(capability, dict):
continue
triggers = capability.get("Triggers")
if triggers:
return triggers
return None |
java | @Override
public boolean canReuseUpdatedViewHolder(@NonNull ViewHolder viewHolder, @NonNull List<Object> payloads) {
return !payloads.isEmpty() || super.canReuseUpdatedViewHolder(viewHolder, payloads);
} |
java | public OvhUser identity_user_user_GET(String user) throws IOException {
String qPath = "/me/identity/user/{user}";
StringBuilder sb = path(qPath, user);
String resp = exec(qPath, "GET", sb.toString(), null);
return convertTo(resp, OvhUser.class);
} |
python | def project_inspect_template_path(cls, project, inspect_template):
"""Return a fully-qualified project_inspect_template string."""
return google.api_core.path_template.expand(
"projects/{project}/inspectTemplates/{inspect_template}",
project=project,
inspect_template=inspect_template,
) |
java | @Override
public Object getMember(String name) {
switch (name) {
case "BOOLEAN":
return F_BOOLEAN;
// case "BYTE":
// return F_BYTE;
// case "SHORT":
// return F_SHORT;
case "INT":
return F_INT;
// case "LONG":
// return F_LONG;
case "FLOAT":
// return F_FLOAT;
case "DOUBLE":
return F_DOUBLE;
case "STRING":
return F_STRING;
// case "DECIMAL":
// return F_DECIMAL;
case "DATE":
return F_DATE;
case "TIMESTAMP":
return F_TIMESTAMP;
// case "BINARY":
// return F_BINARY;
case "tuple2":
return F_tuple2;
case "tuple3":
return F_tuple3;
case "tuple4":
return F_tuple4;
case "tuple5":
return F_tuple5;
}
return super.getMember(name);
} |
python | def set_input_shape_ngpu(self, new_input_shape):
"""
Create and initialize layer parameters on the device previously set
in self.device_name.
:param new_input_shape: a list or tuple for the shape of the input.
"""
assert self.device_name, "Device name has not been set."
device_name = self.device_name
if self.input_shape is None:
# First time setting the input shape
self.input_shape = [None] + [int(d) for d in list(new_input_shape)]
if device_name in self.params_device:
# There is a copy of weights on this device
self.__dict__.update(self.params_device[device_name])
return
# Stop recursion
self.params_device[device_name] = {}
# Initialize weights on this device
with tf.device(device_name):
self.set_input_shape(self.input_shape)
keys_after = self.__dict__.keys()
if self.params_names is None:
# Prevent overriding training
self.params_names = [k for k in keys_after if isinstance(
self.__dict__[k], tf.Variable)]
params = {k: self.__dict__[k] for k in self.params_names}
self.params_device[device_name] = params |
python | def available(name):
'''
Return True if the named service is available.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
cmd = '{0} get {1}'.format(_cmd(), name)
if __salt__['cmd.retcode'](cmd) == 2:
return False
return True |
java | protected AjaxButton newSaveButton(final String id, final Form<?> form)
{
final AjaxButton saveButton = new AjaxButton(id, form)
{
/**
* The serialVersionUID.
*/
private static final long serialVersionUID = 1L;
/**
* {@inheritDoc}
*/
@Override
protected void onSubmit(final AjaxRequestTarget target, final Form<?> form)
{
onSave(target, form);
}
};
return saveButton;
} |
python | def markdown_search_user(request):
"""
Json usernames of the users registered & actived.
url(method=get):
/martor/search-user/?username={username}
Response:
error:
- `status` is status code (204)
- `error` is error message.
success:
- `status` is status code (204)
- `data` is list dict of usernames.
{ 'status': 200,
'data': [
{'usernane': 'john'},
{'usernane': 'albert'}]
}
"""
data = {}
username = request.GET.get('username')
if username is not None \
and username != '' \
and ' ' not in username:
users = User.objects.filter(
Q(username__icontains=username)
).filter(is_active=True)
if users.exists():
data.update({
'status': 200,
'data': [{'username': u.username} for u in users]
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json')
data.update({
'status': 204,
'error': _('No users registered as `%(username)s` '
'or user is unactived.') % {'username': username}
})
else:
data.update({
'status': 204,
'error': _('Validation Failed for field `username`')
})
return HttpResponse(
json.dumps(data, cls=LazyEncoder),
content_type='application/json') |
java | private Context parse(Object localContext, List<String> remoteContexts,
boolean parsingARemoteContext) throws JsonLdError {
if (remoteContexts == null) {
remoteContexts = new ArrayList<String>();
}
// 1. Initialize result to the result of cloning active context.
Context result = this.clone(); // TODO: clone?
// 2)
if (!(localContext instanceof List)) {
final Object temp = localContext;
localContext = new ArrayList<Object>();
((List<Object>) localContext).add(temp);
}
// 3)
for (final Object context : ((List<Object>) localContext)) {
// 3.1)
if (context == null) {
result = new Context(this.options);
continue;
} else if (context instanceof Context) {
result = ((Context) context).clone();
}
// 3.2)
else if (context instanceof String) {
String uri = (String) result.get(JsonLdConsts.BASE);
uri = JsonLdUrl.resolve(uri, (String) context);
// 3.2.2
if (remoteContexts.contains(uri)) {
throw new JsonLdError(Error.RECURSIVE_CONTEXT_INCLUSION, uri);
}
remoteContexts.add(uri);
// 3.2.3: Dereference context
final RemoteDocument rd = this.options.getDocumentLoader().loadDocument(uri);
final Object remoteContext = rd.getDocument();
if (!(remoteContext instanceof Map) || !((Map<String, Object>) remoteContext)
.containsKey(JsonLdConsts.CONTEXT)) {
// If the dereferenced document has no top-level JSON object
// with an @context member
throw new JsonLdError(Error.INVALID_REMOTE_CONTEXT, context);
}
final Object tempContext = ((Map<String, Object>) remoteContext)
.get(JsonLdConsts.CONTEXT);
// 3.2.4
result = result.parse(tempContext, remoteContexts, true);
// 3.2.5
continue;
} else if (!(context instanceof Map)) {
// 3.3
throw new JsonLdError(Error.INVALID_LOCAL_CONTEXT, context);
}
checkEmptyKey((Map<String, Object>) context);
// 3.4
if (!parsingARemoteContext
&& ((Map<String, Object>) context).containsKey(JsonLdConsts.BASE)) {
// 3.4.1
final Object value = ((Map<String, Object>) context).get(JsonLdConsts.BASE);
// 3.4.2
if (value == null) {
result.remove(JsonLdConsts.BASE);
} else if (value instanceof String) {
// 3.4.3
if (JsonLdUtils.isAbsoluteIri((String) value)) {
result.put(JsonLdConsts.BASE, value);
} else {
// 3.4.4
final String baseUri = (String) result.get(JsonLdConsts.BASE);
if (!JsonLdUtils.isAbsoluteIri(baseUri)) {
throw new JsonLdError(Error.INVALID_BASE_IRI, baseUri);
}
result.put(JsonLdConsts.BASE, JsonLdUrl.resolve(baseUri, (String) value));
}
} else {
// 3.4.5
throw new JsonLdError(JsonLdError.Error.INVALID_BASE_IRI,
"@base must be a string");
}
}
// 3.5
if (((Map<String, Object>) context).containsKey(JsonLdConsts.VOCAB)) {
final Object value = ((Map<String, Object>) context).get(JsonLdConsts.VOCAB);
if (value == null) {
result.remove(JsonLdConsts.VOCAB);
} else if (value instanceof String) {
if (JsonLdUtils.isAbsoluteIri((String) value)) {
result.put(JsonLdConsts.VOCAB, value);
} else {
throw new JsonLdError(Error.INVALID_VOCAB_MAPPING,
"@value must be an absolute IRI");
}
} else {
throw new JsonLdError(Error.INVALID_VOCAB_MAPPING,
"@vocab must be a string or null");
}
}
// 3.6
if (((Map<String, Object>) context).containsKey(JsonLdConsts.LANGUAGE)) {
final Object value = ((Map<String, Object>) context).get(JsonLdConsts.LANGUAGE);
if (value == null) {
result.remove(JsonLdConsts.LANGUAGE);
} else if (value instanceof String) {
result.put(JsonLdConsts.LANGUAGE, ((String) value).toLowerCase());
} else {
throw new JsonLdError(Error.INVALID_DEFAULT_LANGUAGE, value);
}
}
// 3.7
final Map<String, Boolean> defined = new LinkedHashMap<String, Boolean>();
for (final String key : ((Map<String, Object>) context).keySet()) {
if (JsonLdConsts.BASE.equals(key) || JsonLdConsts.VOCAB.equals(key)
|| JsonLdConsts.LANGUAGE.equals(key)) {
continue;
}
result.createTermDefinition((Map<String, Object>) context, key, defined);
}
}
return result;
} |
python | def coerce(cls, key, value):
"""Convert plain dictionaries to MutableDict."""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
elif isinstance(value, six.string_types):
# Assume JSON string
if value:
return MutableDict(simplejson.loads(value, use_decimal=True))
else:
return MutableDict() # Empty value is an empty dict
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value |
java | protected static boolean writeListType(Output out, Object listType) {
log.trace("writeListType");
if (listType instanceof List<?>) {
writeList(out, (List<?>) listType);
} else {
return false;
}
return true;
} |
python | def prepare(args):
"""
Read all seq.fa files and create a matrix and unique fasta files.
The information is
:param args: options parsed from command line
:param con: logging messages going to console
:param log: logging messages going to console and file
:returns: files - matrix and fasta files that should be used with
and aligner (as bowtie) and run `seqcluster cluster`
"""
try:
f = open(args.config, 'r')
seq_out = open(op.join(args.out, "seqs.fastq"), 'w')
ma_out = open(op.join(args.out, "seqs.ma"), 'w')
except IOError as e:
traceback.print_exc()
raise IOError("Can not create output files: %s, %s or read %s" % (op.join(args.out, "seqs.ma"), op.join(args.out, "seqs.fastq"), args.config))
logger.info("Reading sequeces")
seq_l, sample_l = _read_fastq_files(f, args)
logger.info("Creating matrix with unique sequences")
logger.info("Filtering: min counts %s, min size %s, max size %s, min shared %s" % (args.minc, args.minl, args.maxl, args.min_shared))
_create_matrix_uniq_seq(sample_l, seq_l, ma_out, seq_out, args.min_shared)
logger.info("Finish preprocessing. Get a sorted BAM file of seqs.fa and run seqcluster cluster.") |
python | def partial_path_match(path1, path2, kwarg_re=r'\{.*\}'):
"""Validates if path1 and path2 matches, ignoring any kwargs in the string.
We need this to ensure we can match Swagger patterns like:
/foo/{id}
against the observed pyramid path
/foo/1
:param path1: path of a url
:type path1: string
:param path2: path of a url
:type path2: string
:param kwarg_re: regex pattern to identify kwargs
:type kwarg_re: regex string
:returns: boolean
"""
split_p1 = path1.split('/')
split_p2 = path2.split('/')
pat = re.compile(kwarg_re)
if len(split_p1) != len(split_p2):
return False
for partial_p1, partial_p2 in zip(split_p1, split_p2):
if pat.match(partial_p1) or pat.match(partial_p2):
continue
if not partial_p1 == partial_p2:
return False
return True |
python | def unique(transactions):
""" Remove any duplicate entries. """
seen = set()
# TODO: Handle comments
return [x for x in transactions if not (x in seen or seen.add(x))] |
python | def null_write_block(fo, block_bytes):
"""Write block in "null" codec."""
write_long(fo, len(block_bytes))
fo.write(block_bytes) |
python | async def close_async(self):
"""Close the client asynchronously. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
"""
if self.message_handler:
await self.message_handler.destroy_async()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
await self._keep_alive_thread
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._connection.cbs:
_logger.info("Closing non-CBS session.")
await asyncio.shield(self._session.destroy_async())
else:
_logger.info("CBS session pending %r.", self._connection.container_id)
self._session = None
if not self._ext_connection:
_logger.info("Closing exclusive connection %r.", self._connection.container_id)
await asyncio.shield(self._connection.destroy_async())
else:
_logger.info("Shared connection remaining open.")
self._connection = None |
java | @Override
public void removeByG_P_A(long groupId, boolean primary, boolean active) {
for (CommerceCurrency commerceCurrency : findByG_P_A(groupId, primary,
active, QueryUtil.ALL_POS, QueryUtil.ALL_POS, null)) {
remove(commerceCurrency);
}
} |
java | public static Object setObjectIndex(Object obj, double dblIndex,
Object value, Context cx,
Scriptable scope)
{
Scriptable sobj = toObjectOrNull(cx, obj, scope);
if (sobj == null) {
throw undefWriteError(obj, String.valueOf(dblIndex), value);
}
int index = (int)dblIndex;
if (index == dblIndex) {
return setObjectIndex(sobj, index, value, cx);
}
String s = toString(dblIndex);
return setObjectProp(sobj, s, value, cx);
} |
python | def qos_queue_scheduler_strict_priority_dwrr_traffic_class1(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos")
queue = ET.SubElement(qos, "queue")
scheduler = ET.SubElement(queue, "scheduler")
strict_priority = ET.SubElement(scheduler, "strict-priority")
dwrr_traffic_class1 = ET.SubElement(strict_priority, "dwrr-traffic-class1")
dwrr_traffic_class1.text = kwargs.pop('dwrr_traffic_class1')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
java | protected final void setMeasuredDimension(View view, int width, int height) {
final ViewProxy proxy = (ViewProxy) view;
proxy.invokeSetMeasuredDimension(width, height);
} |
python | def check_args(args):
"""
Parse arguments and check if the arguments are valid
"""
if not os.path.exists(args.fd):
print("Not a valid path", args.fd, file=ERROR_LOG)
return [], [], False
if args.fl is not None:
# we already ensure the file can be opened and opened the file
file_line = args.fl.readline()
amr_ids = file_line.strip().split()
elif args.f is None:
print("No AMR ID was given", file=ERROR_LOG)
return [], [], False
else:
amr_ids = args.f
names = []
check_name = True
if args.p is None:
names = get_names(args.fd, amr_ids)
# no need to check names
check_name = False
if len(names) == 0:
print("Cannot find any user who tagged these AMR", file=ERROR_LOG)
return [], [], False
else:
names = args.p
if len(names) == 0:
print("No user was given", file=ERROR_LOG)
return [], [], False
if len(names) == 1:
print("Only one user is given. Smatch calculation requires at least two users.", file=ERROR_LOG)
return [], [], False
if "consensus" in names:
con_index = names.index("consensus")
names.pop(con_index)
names.append("consensus")
# check if all the AMR_id and user combinations are valid
if check_name:
pop_name = []
for i, name in enumerate(names):
for amr in amr_ids:
amr_path = args.fd + name + "/" + amr + ".txt"
if not os.path.exists(amr_path):
print("User", name, "fails to tag AMR", amr, file=ERROR_LOG)
pop_name.append(i)
break
if len(pop_name) != 0:
pop_num = 0
for p in pop_name:
print("Deleting user", names[p - pop_num], "from the name list", file=ERROR_LOG)
names.pop(p - pop_num)
pop_num += 1
if len(names) < 2:
print("Not enough users to evaluate. Smatch requires >2 users who tag all the AMRs", file=ERROR_LOG)
return "", "", False
return amr_ids, names, True |
python | def download(self,
name: str,
force: bool = False
) -> bool:
"""
Attempts to download a given Docker image. If `force=True`, then any
previously installed version of the image (described by the
instructions) will be replaced by the image on DockerHub.
Parameters:
name: the name of the Docker image.
Returns:
`True` if successfully downloaded, otherwise `False`.
"""
try:
self.__docker.images.pull(name)
return True
except docker.errors.NotFound:
print("Failed to locate image on DockerHub: {}".format(name))
return False |
python | def _get_flaky_attributes(cls, test_item):
"""
Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies
"""
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
} |
python | def format_date(self, value, format_):
"""
Format the date using Babel
"""
date_ = make_date(value)
return dates.format_date(date_, format_, locale=self.lang) |
python | def feature_names(self, feature_names):
"""Set feature names (column labels).
Parameters
----------
feature_names : list or None
Labels for features. None will reset existing feature names
"""
if not feature_names is None:
# validate feature name
if not isinstance(feature_names, list):
feature_names = list(feature_names)
if len(feature_names) != len(set(feature_names)):
raise ValueError('feature_names must be unique')
if len(feature_names) != self.num_col():
msg = 'feature_names must have the same length as data'
raise ValueError(msg)
# prohibit to use symbols may affect to parse. e.g. ``[]=.``
if not all(isinstance(f, STRING_TYPES) and f.isalnum()
for f in feature_names):
raise ValueError('all feature_names must be alphanumerics')
else:
# reset feature_types also
self.feature_types = None
self._feature_names = feature_names |
python | def update(self, friendly_name=values.unset, unique_name=values.unset):
"""
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
"""
return self._proxy.update(friendly_name=friendly_name, unique_name=unique_name, ) |
python | def parse(self, p_todo):
"""
Returns fully parsed string from 'format_string' attribute with all
placeholders properly substituted by content obtained from p_todo.
It uses preprocessed form of 'format_string' (result of
ListFormatParser._preprocess_format) stored in 'format_list'
attribute.
"""
parsed_list = []
repl_trunc = None
for substr, placeholder, getter in self.format_list:
repl = getter(p_todo) if getter else ''
pattern = MAIN_PATTERN.format(ph=placeholder)
if placeholder == 'S':
repl_trunc = repl
try:
if repl == '':
substr = re.sub(pattern, '', substr)
else:
substr = re.sub(pattern, _strip_placeholder_braces, substr)
substr = re.sub(r'(?<!\\)%({ph}|\[{ph}\])'.format(ph=placeholder), repl, substr)
except re.error:
raise ListFormatError
parsed_list.append(substr)
parsed_str = _unescape_percent_sign(''.join(parsed_list))
parsed_str = _remove_redundant_spaces(parsed_str)
if self.one_line and len(escape_ansi(parsed_str)) >= _columns():
parsed_str = _truncate(parsed_str, repl_trunc)
if re.search('.*\t', parsed_str):
parsed_str = _right_align(parsed_str)
return parsed_str.rstrip() |
python | def nLLevalAllY(ldelta, UY, UX, S):
"""
nLLevalAllY(double ldelta, MatrixXd const & UY, MatrixXd const & UX, VectorXd const & S)
Parameters
----------
ldelta: double
UY: MatrixXd const &
UX: MatrixXd const &
S: VectorXd const &
"""
return _core.nLLevalAllY(ldelta, UY, UX, S) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.