language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def get_datatable(self, **kwargs):
""" Gathers and returns the final :py:class:`Datatable` instance for processing. """
if hasattr(self, '_datatable'):
return self._datatable
datatable_class = self.get_datatable_class()
if datatable_class is None:
class AutoMeta:
model = self.model or self.get_queryset().model
opts = AutoMeta()
datatable_class = Datatable
else:
opts = datatable_class.options_class(datatable_class._meta)
kwargs = self.get_datatable_kwargs(**kwargs)
for meta_opt in opts.__dict__:
if meta_opt in kwargs:
setattr(opts, meta_opt, kwargs.pop(meta_opt))
datatable_class = type('%s_Synthesized' % (datatable_class.__name__,), (datatable_class,), {
'__module__': datatable_class.__module__,
'Meta': opts,
})
self._datatable = datatable_class(**kwargs)
return self._datatable |
python | def __sort_stats(self, sortedby=None):
"""Return the stats (dict) sorted by (sortedby)."""
return sort_stats(self.stats, sortedby,
reverse=glances_processes.sort_reverse) |
java | public static List<Entry<String, Double>> toEntryList(Set<Tuple> set) {
if (set == null || set.isEmpty()) {
return null;
}
List<Entry<String, Double>> result = new ArrayList<Entry<String, Double>>();
for (Tuple tuple : set) {
String element = tuple.getElement();
Double score = tuple.getScore();
Entry<String, Double> entry = new SimpleEntry<String, Double>(element, score);
result.add(entry);
}
return result;
} |
java | public final void setAttribute(String currentElement, StringBuilder contentBuffer) {
if (currentElement.equalsIgnoreCase(GPXTags.TIME)) {
setTime(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.MAGVAR)) {
setMagvar(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.GEOIDHEIGHT)) {
setGeoidheight(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.NAME)) {
setName(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.CMT)) {
setCmt(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.DESC)) {
setDesc(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.SRC)) {
setSrc(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.SYM)) {
setSym(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.TYPE)) {
setType(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.FIX)) {
setFix(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.SAT)) {
setSat(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.HDOP)) {
setHdop(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.VDOP)) {
setVdop(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.PDOP)) {
setPdop(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.AGEOFDGPSDATA)) {
setAgeofdgpsdata(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.DGPSID)) {
setDgpsid(contentBuffer);
} else if (currentElement.equalsIgnoreCase(GPXTags.EXTENSIONS)) {
setExtensions();
}
} |
python | def has_swapped(self):
"""
Check whether any swapping occured on this system since this instance was created.
@return a boolean value
"""
new_values = self._read_swap_count()
for key, new_value in new_values.items():
old_value = self.swap_count.get(key, 0)
if new_value > old_value:
return True
return False |
python | def directory_to_pif(directory, **kwargs):
"""
Convert a directory to a pif
:param directory: Directory to convert to a pif
:param kwargs: any additional keyword arguments. (See `files_to_pif`)
:return: the created pif
"""
# Get the files
files = [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f))]
# Run the pif
return files_to_pif(files, **kwargs) |
python | def kube_node_status_condition(self, metric, scraper_config):
""" The ready status of a cluster node. v1.0+"""
base_check_name = scraper_config['namespace'] + '.node'
metric_name = scraper_config['namespace'] + '.nodes.by_condition'
by_condition_counter = Counter()
for sample in metric.samples:
node_tag = self._label_to_tag("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_tag_check(
sample,
base_check_name,
self.condition_to_status_positive,
scraper_config,
tags=[node_tag] + scraper_config['custom_tags'],
)
# Counts aggregated cluster-wide to avoid no-data issues on node churn,
# node granularity available in the service checks
tags = [
self._label_to_tag("condition", sample[self.SAMPLE_LABELS], scraper_config),
self._label_to_tag("status", sample[self.SAMPLE_LABELS], scraper_config),
] + scraper_config['custom_tags']
by_condition_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(by_condition_counter):
self.gauge(metric_name, count, tags=list(tags)) |
java | public static String getStateStyle(Item resourceItem) {
String result = "";
if (resourceItem != null) {
if ((resourceItem.getItemProperty(PROPERTY_INSIDE_PROJECT) == null)
|| ((Boolean)resourceItem.getItemProperty(PROPERTY_INSIDE_PROJECT).getValue()).booleanValue()) {
CmsResourceState state = (CmsResourceState)resourceItem.getItemProperty(
CmsResourceTableProperty.PROPERTY_STATE).getValue();
result = getStateStyle(state);
} else {
result = OpenCmsTheme.PROJECT_OTHER;
}
if ((resourceItem.getItemProperty(PROPERTY_RELEASED_NOT_EXPIRED) != null)
&& !((Boolean)resourceItem.getItemProperty(PROPERTY_RELEASED_NOT_EXPIRED).getValue()).booleanValue()) {
result += " " + OpenCmsTheme.EXPIRED;
}
if ((resourceItem.getItemProperty(CmsResourceTableProperty.PROPERTY_DISABLED) != null)
&& ((Boolean)resourceItem.getItemProperty(
CmsResourceTableProperty.PROPERTY_DISABLED).getValue()).booleanValue()) {
result += " " + OpenCmsTheme.DISABLED;
}
}
return result;
} |
java | public Set<Integer> getOutlinkIDs()
{
Set<Integer> tmpSet = new HashSet<Integer>();
Session session = wiki.__getHibernateSession();
session.beginTransaction();
session.buildLockRequest(LockOptions.NONE).lock(hibernatePage);
tmpSet.addAll(hibernatePage.getOutLinks());
session.getTransaction().commit();
return tmpSet;
} |
python | def get_payload(self):
"""Return Payload."""
ret = bytes([self.session_id >> 8 & 255, self.session_id & 255])
ret += bytes([self.index_id])
ret += bytes([self.node_parameter])
ret += bytes([self.seconds >> 8 & 255, self.seconds & 255])
return ret |
python | def truncate_loc(self, character, location, branch, turn, tick):
"""Remove future data about a particular location
Return True if I deleted anything, False otherwise.
"""
r = False
branches_turns = self.branches[character, location][branch]
branches_turns.truncate(turn)
if turn in branches_turns:
bttrn = branches_turns[turn]
if bttrn.future(tick):
bttrn.truncate(tick)
r = True
keyses = self.keys[character, location]
for keysbranches in keyses.values():
if branch not in keysbranches:
continue
keysbranch = keysbranches[branch]
if keysbranch.future(turn):
keysbranch.truncate(turn)
r = True
if turn in keysbranch:
keysbranchturn = keysbranch[turn]
if keysbranchturn.future(tick):
keysbranchturn.truncate(tick)
r = True
if branch in self.settings:
for sets in (self.settings, self.presettings):
sets_branch = sets[branch]
if turn in sets_branch:
sets_turn = sets_branch[turn]
for tic, setting in list(sets_turn.future(tick).items()):
if setting[:2] == (character, location):
del sets_turn[tic]
r = True
if not sets_turn:
del sets_branch[turn]
assert r, "Found an empty cache when I didn't delete anything"
for trn, tics in list(sets_branch.future(turn).items()):
for tic, setting in list(tics.future(tick).items()):
if setting[:2] == (character, location):
del tics[tic]
r = True
if not tics:
del sets_branch[trn]
assert r, "Found an empty cache when I didn't delete anything"
if not sets_branch:
del sets[branch]
assert r, "Found an empty cache when I didn't delete anything"
self.shallowest = OrderedDict()
return r |
java | protected void parseMetaDataLine(final ParserData parserData, final String line, int lineNumber) throws ParsingException {
// Parse the line and break it up into the key/value pair
Pair<String, String> keyValue = null;
try {
keyValue = ProcessorUtilities.getAndValidateKeyValuePair(line);
} catch (InvalidKeyValueException e) {
throw new ParsingException(format(ProcessorConstants.ERROR_INVALID_METADATA_FORMAT_MSG, lineNumber, line));
}
final String key = keyValue.getFirst();
final String value = keyValue.getSecond();
if (parserData.getParsedMetaDataKeys().contains(key.toLowerCase())) {
throw new ParsingException(format(ProcessorConstants.ERROR_DUPLICATE_METADATA_FORMAT_MSG, lineNumber, key, line));
} else {
parserData.getParsedMetaDataKeys().add(key.toLowerCase());
// first deal with metadata that is used by the parser or needs to be parsed further
if (key.equalsIgnoreCase(CommonConstants.CS_SPACES_TITLE)) {
// Read in the amount of spaces that were used for the content specification
try {
parserData.setIndentationSize(Integer.parseInt(value));
if (parserData.getIndentationSize() <= 0) {
parserData.setIndentationSize(2);
}
} catch (NumberFormatException e) {
throw new ParsingException(format(ProcessorConstants.ERROR_INVALID_NUMBER_MSG, lineNumber, line));
}
} else if (key.equalsIgnoreCase(CSConstants.DEBUG_TITLE)) {
if (value.equals("1")) {
log.setVerboseDebug(1);
} else if (value.equals("2")) {
log.setVerboseDebug(2);
} else if (value.equals("0")) {
log.setVerboseDebug(0);
} else {
log.warn(ProcessorConstants.WARN_DEBUG_IGNORE_MSG);
}
} else if (key.equalsIgnoreCase(CommonConstants.CS_INLINE_INJECTION_TITLE)) {
final InjectionOptions injectionOptions = new InjectionOptions();
String[] types = null;
if (StringUtilities.indexOf(value, '[') != -1) {
if (StringUtilities.indexOf(value, ']') != -1) {
final Matcher matcher = SQUARE_BRACKET_PATTERN.matcher(value);
// Find all of the variables inside of the brackets defined by the regex
while (matcher.find()) {
final String topicTypes = matcher.group(ProcessorConstants.BRACKET_CONTENTS);
types = StringUtilities.split(topicTypes, ',');
for (final String type : types) {
injectionOptions.addStrictTopicType(type.trim());
}
}
} else {
throw new ParsingException(
format(ProcessorConstants.ERROR_NO_ENDING_BRACKET_MSG + ProcessorConstants.CSLINE_MSG, lineNumber, ']',
line));
}
}
String injectionSetting = getTitle(value, '[');
if (injectionSetting == null) {
throw new ParsingException(format(ProcessorConstants.ERROR_INVALID_INJECTION_MSG, lineNumber, line));
} else if (injectionSetting.trim().equalsIgnoreCase("on")) {
if (types != null) {
injectionOptions.setContentSpecType(InjectionOptions.UserType.STRICT);
} else {
injectionOptions.setContentSpecType(InjectionOptions.UserType.ON);
}
} else if (injectionSetting.trim().equalsIgnoreCase("off")) {
injectionOptions.setContentSpecType(InjectionOptions.UserType.OFF);
} else {
throw new ParsingException(format(ProcessorConstants.ERROR_INVALID_INJECTION_MSG, lineNumber, line));
}
parserData.getContentSpec().setInjectionOptions(injectionOptions);
} else if (key.equalsIgnoreCase(CommonConstants.CS_FILE_TITLE) || key.equalsIgnoreCase(CommonConstants.CS_FILE_SHORT_TITLE)) {
final FileList files = parseFilesMetaData(parserData, value, lineNumber, line);
parserData.getContentSpec().appendKeyValueNode(files);
} else if (isSpecTopicMetaData(key, value)) {
final SpecTopic specTopic = parseSpecTopicMetaData(parserData, value, key, lineNumber);
parserData.getContentSpec().appendKeyValueNode(new KeyValueNode<SpecTopic>(key, specTopic, lineNumber));
} else {
try {
final KeyValueNode<String> node;
if (ContentSpecUtilities.isMetaDataMultiLine(key)) {
node = parseMultiLineMetaData(parserData, key, value, lineNumber);
} else {
node = new KeyValueNode<String>(key, value, lineNumber);
}
parserData.getContentSpec().appendKeyValueNode(node);
} catch (NumberFormatException e) {
throw new ParsingException(format(ProcessorConstants.ERROR_INVALID_METADATA_FORMAT_MSG, lineNumber, line));
}
}
}
} |
python | def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator |
python | def _GetPredicate(self, pred_str, test_attr=False):
"""
The user's predicates are consulted first, then the default predicates.
"""
predicate, args, func_type = self.predicates.LookupWithType(pred_str)
if predicate:
pred = predicate, args, func_type
else:
# Nicer syntax, {.debug?} is shorthand for {.if test debug}.
# Currently there is not if/elif chain; just use
# {.if test debug} {.or test release} {.or} {.end}
if test_attr:
assert pred_str.endswith('?')
# func, args, func_type
pred = (_TestAttribute, (pred_str[:-1],), ENHANCED_FUNC)
else:
raise BadPredicate('%r is not a valid predicate' % pred_str)
return pred |
java | public Jar setListAttribute(String name, Collection<?> values) {
return setAttribute(name, join(values));
} |
python | def _fast_memory_load_bytes(self, addr, length):
"""
Perform a fast memory loading of some data.
:param int addr: Address to read from.
:param int length: Size of the string to load.
:return: A string or None if the address does not exist.
:rtype: bytes or None
"""
try:
return self.project.loader.memory.load(addr, length)
except KeyError:
return None |
python | def pan(self, value):
"""Pan translation."""
assert len(value) == 2
self._pan[:] = value
self._constrain_pan()
self.update() |
python | def index(self, corpus=None, clear_buffer=True):
"""
Permanently index all documents previously added via `buffer`, or
directly index documents from `corpus`, if specified.
The indexing model must already exist (see `train`) before this function
is called.
"""
if not self.model:
msg = 'must initialize model for %s before indexing documents' % self.basename
logger.error(msg)
raise AttributeError(msg)
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "index called but no indexing corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if not self.fresh_index:
logger.info("starting a new fresh index for %s" % self)
self.fresh_index = SimIndex(self.location('index_fresh'), self.model.num_features)
self.fresh_index.index_documents(self.fresh_docs, self.model)
if self.opt_index is not None:
self.opt_index.delete(self.fresh_docs.keys())
logger.info("storing document payloads")
for docid in self.fresh_docs:
payload = self.fresh_docs[docid].get('payload', None)
if payload is None:
# HACK: exit on first doc without a payload (=assume all docs have payload, or none does)
break
self.payload[docid] = payload
self.flush(save_index=True, clear_buffer=clear_buffer) |
java | private void convertToMentsuComp(List<Mentsu> winCandidate) throws IllegalMentsuSizeException {
//全て0かチェック
if (isAllZero(handStocks)) {
canWin = true;
winCandidate.addAll(inputtedMentsuList);
MentsuComp mentsuComp = new MentsuComp(winCandidate, last);
if (!mentsuCompSet.contains(mentsuComp)) {
mentsuCompSet.add(mentsuComp);
}
}
} |
java | private void failWithMessage(String errorMessage) {
AssertionError assertionError = Failures.instance().failureIfErrorMessageIsOverridden(info);
if (assertionError == null) {
// error message was not overridden, build it.
String description = MessageFormatter.instance().format(info.description(), info.representation(), "");
assertionError = new AssertionError(description + errorMessage);
}
Failures.instance().removeAssertJRelatedElementsFromStackTraceIfNeeded(assertionError);
throw assertionError;
} |
python | def initLogging(self):
"""Configures the logger."""
verbose_levels = {
0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG,
}
logging.basicConfig(
level=verbose_levels[self.verbose],
format="[%(asctime)-15s] %(module)-9s %(levelname)-7s %(message)s"
)
return logging.getLogger(self.__class__.__name__) |
java | public VarTensor getMarginal(VarSet vars, boolean normalize) {
VarSet margVars = new VarSet(this.vars);
margVars.retainAll(vars);
VarTensor marg = new VarTensor(s, margVars, s.zero());
if (margVars.size() == 0) {
return marg;
}
IntIter iter = margVars.getConfigIter(this.vars);
for (int i=0; i<this.values.length; i++) {
int j = iter.next();
marg.values[j] = s.plus(marg.values[j], this.values[i]);
}
if (normalize) {
marg.normalize();
}
return marg;
} |
java | public final void entryRuleAbstractToken() throws RecognitionException {
try {
// InternalXtext.g:421:1: ( ruleAbstractToken EOF )
// InternalXtext.g:422:1: ruleAbstractToken EOF
{
before(grammarAccess.getAbstractTokenRule());
pushFollow(FollowSets000.FOLLOW_1);
ruleAbstractToken();
state._fsp--;
after(grammarAccess.getAbstractTokenRule());
match(input,EOF,FollowSets000.FOLLOW_2);
}
}
catch (RecognitionException re) {
reportError(re);
recover(input,re);
}
finally {
}
return ;
} |
java | @BetaApi
public final Operation deleteInstancesRegionInstanceGroupManager(
String instanceGroupManager,
RegionInstanceGroupManagersDeleteInstancesRequest
regionInstanceGroupManagersDeleteInstancesRequestResource) {
DeleteInstancesRegionInstanceGroupManagerHttpRequest request =
DeleteInstancesRegionInstanceGroupManagerHttpRequest.newBuilder()
.setInstanceGroupManager(instanceGroupManager)
.setRegionInstanceGroupManagersDeleteInstancesRequestResource(
regionInstanceGroupManagersDeleteInstancesRequestResource)
.build();
return deleteInstancesRegionInstanceGroupManager(request);
} |
python | def handle(self):
"""
The required handle method.
"""
logger = StreamHandler.logger
logger.debug("handling requests with message handler %s " % StreamHandler.message_handler.__class__.__name__)
message_handler = StreamHandler.message_handler
try:
while True:
logger.debug('waiting for more data')
if not message_handler.handle(self.request, StreamHandler.BUFFER_SIZE):
break
logger.warning("connection closed from %s" % (self.client_address[0]))
self.request.close()
except:
logger.exception("connection closed from %s" % (self.client_address[0]))
finally:
self.request.close() |
java | public static Registry run(String type, String test) {
Registry registry = createRegistry(type);
TESTS.get(test).accept(registry);
return registry;
} |
java | public static <T> T assertSame(T exp, T was, String message) {
assertNotNull(exp, message);
assertNotNull(was, message);
assertTrue(exp == was, message);
return was;
} |
python | def combine(a1, a2):
''' Combine to argument into a single flat list
It is used when you are not sure whether arguments are lists but want to combine them into one flat list
Args:
a1: list or other thing
a2: list or other thing
Returns:
list: a flat list contain a1 and a2
'''
if not isinstance(a1, list):
a1 = [a1]
if not isinstance(a2, list):
a2 = [a2]
return a1 + a2 |
python | def enumerate(self):
"""
Enumerate top MaxSAT solutions (from best to worst). The
method works as a generator, which iteratively calls
:meth:`compute` to compute a MaxSAT model, blocks it
internally and returns it.
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2]) # adding clauses "on the fly"
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> for model in rc2.enumerate():
... print model, rc2.cost
[-1, -2, 3] 2
[1, -2, -3] 2
[-1, 2, -3] 2
[-1, -2, -3] 3
>>> rc2.delete()
"""
done = False
while not done:
model = self.compute()
if model != None:
self.add_clause([-l for l in model])
yield model
else:
done = True |
java | public void setLibs(final CUtil.StringArrayBuilder libs) throws BuildException {
if (isReference()) {
throw tooManyAttributes();
}
this.libnames = libs.getValue();
//
// earlier implementations would warn of suspicious library names
// (like libpthread for pthread or kernel.lib for kernel).
// visitLibraries now provides better feedback and ld type linkers
// should provide adequate feedback so the check here is not necessary.
} |
java | public static synchronized boolean updateAdvertisingIdClientInfo(Context context) {
ApptentiveLog.v(ADVERTISER_ID, "Updating advertiser ID client info...");
AdvertisingIdClientInfo clientInfo = resolveAdvertisingIdClientInfo(context);
if (clientInfo != null && clientInfo.equals(cachedClientInfo)) {
return false; // no changes
}
ApptentiveLog.v(ADVERTISER_ID, "Advertiser ID client info changed: %s", clientInfo);
cachedClientInfo = clientInfo;
notifyClientInfoChanged(cachedClientInfo);
return true;
} |
python | def _read_opt_pdm(self, code, *, desc):
"""Read HOPOPT PDM option.
Structure of HOPOPT PDM option [RFC 8250]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Option Type | Option Length | ScaleDTLR | ScaleDTLS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| PSN This Packet | PSN Last Received |
|-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Delta Time Last Received | Delta Time Last Sent |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pdm.type Option Type
0 0 hopopt.pdm.type.value Option Number
0 0 hopopt.pdm.type.action Action (00)
0 2 hopopt.pdm.type.change Change Flag (0)
1 8 hopopt.pdm.length Length of Option Data
2 16 hopopt.pdm.scaledtlr Scale Delta Time Last Received
3 24 hopopt.pdm.scaledtls Scale Delta Time Last Sent
4 32 hopopt.pdm.psntp Packet Sequence Number This Packet
6 48 hopopt.pdm.psnlr Packet Sequence Number Last Received
8 64 hopopt.pdm.deltatlr Delta Time Last Received
10 80 hopopt.pdm.deltatls Delta Time Last Sent
"""
_type = self._read_opt_type(code)
_size = self._read_unpack(1)
if _size != 10:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
_stlr = self._read_unpack(1)
_stls = self._read_unpack(1)
_psnt = self._read_unpack(2)
_psnl = self._read_unpack(2)
_dtlr = self._read_unpack(2)
_dtls = self._read_unpack(2)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
scaledtlr=datetime.timedelta(seconds=_stlr),
scaledtls=datetime.timedelta(seconds=_stls),
psntp=_psnt,
psnlr=_psnl,
deltatlr=datetime.timedelta(seconds=_dtlr),
deltatls=datetime.timedelta(seconds=_dtls),
)
return opt |
python | def has_enumerated_namespace_name(self, namespace: str, name: str) -> bool:
"""Check that the namespace is defined by an enumeration and that the name is a member."""
return self.has_enumerated_namespace(namespace) and name in self.namespace_to_terms[namespace] |
python | def ParseMultiple(self, stats, unused_file_obj, unused_kb):
"""Identify the init scripts and the start/stop scripts at each runlevel.
Evaluate all the stat entries collected from the system.
If the path name matches a runlevel spec, and if the filename matches a
sysv init symlink process the link as a service.
Args:
stats: An iterator of StatEntry rdfs.
unused_file_obj: An iterator of file contents. Not needed as the parser
only evaluates link attributes.
unused_kb: Unused KnowledgeBase rdf.
Yields:
rdf_anomaly.Anomaly if the startup link seems wierd.
rdf_client.LinuxServiceInformation for each detected service.
"""
services = {}
for stat_entry in stats:
path = stat_entry.pathspec.path
runlevel = self.runlevel_re.match(os.path.dirname(path))
runscript = self.runscript_re.match(os.path.basename(path))
if runlevel and runscript:
svc = runscript.groupdict()
service = services.setdefault(
svc["name"],
rdf_client.LinuxServiceInformation(
name=svc["name"], start_mode="INIT"))
runlvl = GetRunlevelsNonLSB(runlevel.group(1))
if svc["action"] == "S" and runlvl:
service.start_on.append(runlvl.pop())
service.starts = True
elif runlvl:
service.stop_on.append(runlvl.pop())
if not stat.S_ISLNK(int(stat_entry.st_mode)):
yield rdf_anomaly.Anomaly(
type="PARSER_ANOMALY",
finding=[path],
explanation="Startup script is not a symlink.")
for svc in itervalues(services):
yield svc |
java | @Override
public int nextDoc() throws IOException {
resetQueue();
noMorePositions = false;
return (spans.nextDoc() == NO_MORE_DOCS) ? NO_MORE_DOCS : toMatchDoc();
} |
java | public static String getRandomHexString(int length){
byte[] bytes = new byte[length/2];
new Random().nextBytes(bytes);
return DatatypeConverter.printHexBinary(bytes);
} |
python | def mk_metadata_csv(filedir, outputfilepath, max_bytes=MAX_FILE_DEFAULT):
"""
Make metadata file for all files in a directory.
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param outputfilepath: This field is the file path of the output csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
with open(outputfilepath, 'w') as filestream:
write_metadata_to_filestream(filedir, filestream, max_bytes) |
java | public boolean getStatusAgg(byte[] row, byte[] col) throws IOException {
Get g = new Get(row);
g.addColumn(Constants.INFO_FAM_BYTES, col);
Table rawTable = null;
Cell cell = null;
try {
rawTable = hbaseConnection
.getTable(TableName.valueOf(Constants.HISTORY_RAW_TABLE));
Result r = rawTable.get(g);
cell = r.getColumnLatestCell(Constants.INFO_FAM_BYTES, col);
} finally {
if (rawTable != null) {
rawTable.close();
}
}
boolean status = false;
try {
if (cell != null) {
status = Bytes.toBoolean(CellUtil.cloneValue(cell));
}
} catch (IllegalArgumentException iae) {
LOG.error("Caught " + iae);
}
LOG.info("Returning from Raw, " + Bytes.toString(col) + " for this job="
+ status);
return status;
} |
python | def _validate_netengine(self):
"""
call netengine validate() method
verifies connection parameters are correct
"""
if self.backend:
try:
self.netengine.validate()
except NetEngineError as e:
raise ValidationError(e) |
java | static TypeName arrayComponent(TypeName type) {
return type instanceof ArrayTypeName
? ((ArrayTypeName) type).componentType
: null;
} |
java | @SuppressWarnings("unchecked")
@Override
public EList<IfcOrganizationRelationship> getRelates() {
return (EList<IfcOrganizationRelationship>) eGet(Ifc4Package.Literals.IFC_ORGANIZATION__RELATES, true);
} |
java | public void createUpdate4Trigger(String tableName,
String geometryColumnName, String idColumnName) {
String sqlName = GeoPackageProperties.getProperty(TRIGGER_PROPERTY,
TRIGGER_UPDATE4_NAME);
executeSQL(sqlName, tableName, geometryColumnName, idColumnName);
} |
python | def _center(X, w, s, mask=None, const=None, inplace=True):
""" Centers the data.
Parameters
----------
w : float
statistical weight of s
inplace : bool
center in place
Returns
-------
sx : ndarray
uncentered row sum of X
sx_centered : ndarray
row sum of X after centering
optional returns (only if Y is given):
sy_raw : ndarray
uncentered row sum of Y
sy_centered : ndarray
row sum of Y after centering
"""
xmean = s / float(w)
if mask is None:
X = np.subtract(X, xmean, out=X if inplace else None)
else:
X = np.subtract(X, xmean[mask], out=X if inplace else None)
const = np.subtract(const, xmean[~mask], const if inplace else None)
return X, const |
python | def build(self, builder):
"""Build XML by appending to builder"""
params = dict(OID=self.oid)
if self.user_type:
params.update(dict(UserType=self.user_type.value))
builder.start(self.__class__.__name__, params)
# build the children
for child in ('login_name', 'display_name', 'full_name', 'first_name', 'last_name',
'organisation'):
if getattr(self, child) is not None:
getattr(self, child).build(builder)
for address in self.addresses:
address.build(builder)
for email in self.emails:
email.build(builder)
for phone in self.phones:
phone.build(builder)
for location in self.locations:
location.build(builder)
builder.end(self.__class__.__name__) |
python | async def patch_entries(self, entry, **kwargs):
"""
PATCH /api/entries/{entry}.{_format}
Change several properties of an entry
:param entry: the entry to 'patch' / update
:param kwargs: can contain one of the following
title: string
tags: a list of tags tag1,tag2,tag3
archive: '0' or '1', default '0' archived the entry.
starred: '0' or '1', default '0' starred the entry
In case that you don't want to *really* remove it..
:return data related to the ext
"""
# default values
params = {'access_token': self.token,
'title': '',
'tags': []}
if 'title' in kwargs:
params['title'] = kwargs['title']
if 'tags' in kwargs and isinstance(kwargs['tags'], list):
params['tags'] = ', '.join(kwargs['tags'])
params['archive'] = self.__get_attr(what='archive',
type_attr=int,
value_attr=(0, 1),
**kwargs)
params['starred'] = self.__get_attr(what='starred',
type_attr=int,
value_attr=(0, 1),
**kwargs)
params['order'] = self.__get_attr(what='order',
type_attr=str,
value_attr=('asc', 'desc'),
**kwargs)
path = '/api/entries/{entry}.{ext}'.format(
entry=entry, ext=self.format)
return await self.query(path, "patch", **params) |
java | public static String convertToLevel3(final String biopaxData) {
String toReturn = "";
try {
ByteArrayOutputStream os = new ByteArrayOutputStream();
InputStream is = new ByteArrayInputStream(biopaxData.getBytes());
SimpleIOHandler io = new SimpleIOHandler();
io.mergeDuplicates(true);
Model model = io.convertFromOWL(is);
if (model.getLevel() != BioPAXLevel.L3) {
log.info("Converting to BioPAX Level3... " + model.getXmlBase());
model = (new LevelUpgrader()).filter(model);
if (model != null) {
io.setFactory(model.getLevel().getDefaultFactory());
io.convertToOWL(model, os);
toReturn = os.toString();
}
} else {
toReturn = biopaxData;
}
} catch(Exception e) {
throw new RuntimeException(
"Cannot convert to BioPAX Level3", e);
}
return toReturn;
} |
python | def OnSearchFlag(self, event):
"""Event handler for search flag toggle buttons"""
for label in self.search_options_buttons:
button_id = self.label2id[label]
if button_id == event.GetId():
if event.IsChecked():
self.search_options.append(label)
else:
flag_index = self.search_options.index(label)
self.search_options.pop(flag_index)
event.Skip() |
java | private static int[] determineSeparatorCounts(String from, int single_quote) {
int[] result = new int[separators.length];
byte[] bits = from.getBytes();
boolean in_quote = false;
for( int j=0; j< bits.length; j++ ) {
byte c = bits[j];
if( (c == single_quote) || (c == CHAR_DOUBLE_QUOTE) )
in_quote ^= true;
if( !in_quote || c == HIVE_SEP )
for( int i = 0; i < separators.length; ++i)
if (c == separators[i])
++result[i];
}
return result;
} |
java | public static KeyStore getInstance(String type, String provider)
throws KeyStoreException, NoSuchProviderException
{
if (provider == null || provider.length() == 0)
throw new IllegalArgumentException("missing provider");
try {
Object[] objs = Security.getImpl(type, "KeyStore", provider);
return new KeyStore((KeyStoreSpi)objs[0], (Provider)objs[1], type);
} catch (NoSuchAlgorithmException nsae) {
throw new KeyStoreException(type + " not found", nsae);
}
} |
python | def inline(sconf):
"""
Return config in inline form, opposite of :meth:`config.expand`.
Parameters
----------
sconf : dict
Returns
-------
dict
configuration with optional inlined configs.
"""
if (
'shell_command' in sconf
and isinstance(sconf['shell_command'], list)
and len(sconf['shell_command']) == 1
):
sconf['shell_command'] = sconf['shell_command'][0]
if len(sconf.keys()) == int(1):
sconf = sconf['shell_command']
if (
'shell_command_before' in sconf
and isinstance(sconf['shell_command_before'], list)
and len(sconf['shell_command_before']) == 1
):
sconf['shell_command_before'] = sconf['shell_command_before'][0]
# recurse into window and pane config items
if 'windows' in sconf:
sconf['windows'] = [inline(window) for window in sconf['windows']]
if 'panes' in sconf:
sconf['panes'] = [inline(pane) for pane in sconf['panes']]
return sconf |
java | private boolean fillBuffer(int offset) throws IOException {
int maxReadLength = this.readBuffer.length - offset;
// special case for reading the whole split.
if (this.splitLength == FileInputFormat.READ_WHOLE_SPLIT_FLAG) {
int read = this.stream.read(this.readBuffer, offset, maxReadLength);
if (read == -1) {
this.stream.close();
this.stream = null;
return false;
} else {
this.readPos = offset;
this.limit = read;
return true;
}
}
// else ..
int toRead;
if (this.splitLength > 0) {
// if we have more data, read that
toRead = this.splitLength > maxReadLength ? maxReadLength : (int) this.splitLength;
}
else {
// if we have exhausted our split, we need to complete the current record, or read one
// more across the next split.
// the reason is that the next split will skip over the beginning until it finds the first
// delimiter, discarding it as an incomplete chunk of data that belongs to the last record in the
// previous split.
toRead = maxReadLength;
this.overLimit = true;
}
int read = this.stream.read(this.readBuffer, offset, toRead);
if (read == -1) {
this.stream.close();
this.stream = null;
return false;
} else {
this.splitLength -= read;
this.readPos = offset; // position from where to start reading
this.limit = read + offset; // number of valid bytes in the read buffer
return true;
}
} |
java | public void setNotificationTransitionEnabled(boolean enable) {
if (mTransitionEnabled != enable) {
if (DBG) Log.v(TAG, "transition enable - " + enable);
mTransitionEnabled = enable;
}
} |
java | public static double copySign(double magnitude, double sign) {
return Math.copySign(magnitude, (Double.isNaN(sign)?1.0d:sign));
} |
java | private void gc() {
I_CmsLruCacheObject currentObject = m_listTail;
while (currentObject != null) {
if (m_objectCosts < m_avgCacheCosts) {
break;
}
currentObject = currentObject.getNextLruObject();
removeTail();
}
} |
java | @Override
@Transactional(enabled = false)
public CommerceNotificationQueueEntry createCommerceNotificationQueueEntry(
long commerceNotificationQueueEntryId) {
return commerceNotificationQueueEntryPersistence.create(commerceNotificationQueueEntryId);
} |
python | def _EscapeGlobCharacters(path):
"""Escapes the glob characters in a path.
Python 3 has a glob.escape method, but python 2 lacks it, so we manually
implement this method.
Args:
path: The absolute path to escape.
Returns:
The escaped path string.
"""
drive, path = os.path.splitdrive(path)
return '%s%s' % (drive, _ESCAPE_GLOB_CHARACTERS_REGEX.sub(r'[\1]', path)) |
java | @Override
public UpdateNotificationSettingsResult updateNotificationSettings(UpdateNotificationSettingsRequest request) {
request = beforeClientExecution(request);
return executeUpdateNotificationSettings(request);
} |
java | protected RunnerResult doRun(Class<? extends LaJob> jobType, LaJobRuntime runtime) {
// similar to async manager's process
arrangeThreadCacheContext(runtime);
arrangePreparedAccessContext(runtime);
arrangeCallbackContext(runtime);
final Object variousPreparedObj = prepareVariousContext(runtime);
final long before = showRunning(runtime);
Throwable cause = null;
try {
debugFw(runtime, "...Calling try clause of job runner");
hookBefore(runtime);
debugFw(runtime, "...Calling actuallyRun() of job runner");
actuallyRun(jobType, runtime);
} catch (Throwable e) {
debugFw(runtime, "...Calling catch clause of job runner: {}", e.getClass().getSimpleName());
final Throwable filtered = filterCause(e);
cause = filtered;
showJobException(runtime, before, filtered);
} finally {
debugFw(runtime, "...Calling finally clause of job runner");
hookFinally(runtime, OptionalThing.ofNullable(cause, () -> {
throw new IllegalStateException("Not found the cause: " + runtime);
}));
showFinishing(runtime, before, cause); // should be before clearing because of using them
clearVariousContext(runtime, variousPreparedObj);
clearPreparedAccessContext();
clearCallbackContext();
clearThreadCacheContext();
}
debugFw(runtime, "...Calling createRunnerResult() of job runner");
return createRunnerResult(runtime, cause);
} |
python | def contains(self, other):
"""
Returns True if offset vector @other can be found in @self,
False otherwise. An offset vector is "found in" another
offset vector if the latter contains all of the former's
instruments and the relative offsets among those
instruments are equal (the absolute offsets need not be).
Example:
>>> a = offsetvector({"H1": 10, "L1": 20, "V1": 30})
>>> b = offsetvector({"H1": 20, "V1": 40})
>>> a.contains(b)
True
Note the distinction between this and the "in" operator:
>>> "H1" in a
True
"""
return offsetvector((key, offset) for key, offset in self.items() if key in other).deltas == other.deltas |
java | public static nshttpprofile get(nitro_service service, String name) throws Exception{
nshttpprofile obj = new nshttpprofile();
obj.set_name(name);
nshttpprofile response = (nshttpprofile) obj.get_resource(service);
return response;
} |
python | def get_field_type(field):
"""
Returns field type/possible values.
"""
if isinstance(field, core_filters.MappedMultipleChoiceFilter):
return ' | '.join(['"%s"' % f for f in sorted(field.mapped_to_model)])
if isinstance(field, OrderingFilter) or isinstance(field, ChoiceFilter):
return ' | '.join(['"%s"' % f[0] for f in field.extra['choices']])
if isinstance(field, ChoiceField):
return ' | '.join(['"%s"' % f for f in sorted(field.choices)])
if isinstance(field, HyperlinkedRelatedField):
if field.view_name.endswith('detail'):
return 'link to %s' % reverse(field.view_name,
kwargs={'%s' % field.lookup_field: "'%s'" % field.lookup_field})
return reverse(field.view_name)
if isinstance(field, structure_filters.ServiceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_filter_mapping().keys()])
if isinstance(field, ResourceTypeFilter):
return ' | '.join(['"%s"' % f for f in SupportedServices.get_resource_models().keys()])
if isinstance(field, core_serializers.GenericRelatedField):
links = []
for model in field.related_models:
detail_view_name = core_utils.get_detail_view_name(model)
for f in field.lookup_fields:
try:
link = reverse(detail_view_name, kwargs={'%s' % f: "'%s'" % f})
except NoReverseMatch:
pass
else:
links.append(link)
break
path = ', '.join(links)
if path:
return 'link to any: %s' % path
if isinstance(field, core_filters.ContentTypeFilter):
return "string in form 'app_label'.'model_name'"
if isinstance(field, ModelMultipleChoiceFilter):
return get_field_type(field.field)
if isinstance(field, ListSerializer):
return 'list of [%s]' % get_field_type(field.child)
if isinstance(field, ManyRelatedField):
return 'list of [%s]' % get_field_type(field.child_relation)
if isinstance(field, ModelField):
return get_field_type(field.model_field)
name = field.__class__.__name__
for w in ('Filter', 'Field', 'Serializer'):
name = name.replace(w, '')
return FIELDS.get(name, name) |
java | public void checkCloudSdk(CloudSdk cloudSdk, String version)
throws CloudSdkVersionFileException, CloudSdkNotFoundException, CloudSdkOutOfDateException {
if (!version.equals(cloudSdk.getVersion().toString())) {
throw new RuntimeException(
"Specified Cloud SDK version ("
+ version
+ ") does not match installed version ("
+ cloudSdk.getVersion()
+ ").");
}
cloudSdk.validateCloudSdk();
} |
java | public static TravelingSalesman of(int stops, double radius) {
final MSeq<double[]> points = MSeq.ofLength(stops);
final double delta = 2.0*PI/stops;
for (int i = 0; i < stops; ++i) {
final double alpha = delta*i;
final double x = cos(alpha)*radius + radius;
final double y = sin(alpha)*radius + radius;
points.set(i, new double[]{x, y});
}
// Shuffling of the created points.
final Random random = RandomRegistry.getRandom();
for (int j = points.length() - 1; j > 0; --j) {
final int i = random.nextInt(j + 1);
final double[] tmp = points.get(i);
points.set(i, points.get(j));
points.set(j, tmp);
}
return new TravelingSalesman(points.toISeq());
} |
python | def pixel_coord(self):
"""
Return the coordinates of the source in the cutout reference frame.
@return:
"""
return self.get_pixel_coordinates(self.reading.pix_coord, self.reading.get_ccd_num()) |
python | def available_modes_with_ids(self):
"""Return list of objects containing available mode name and id."""
if not self._available_mode_ids:
all_modes = FIXED_MODES.copy()
self._available_mode_ids = all_modes
modes = self.get_available_modes()
try:
if modes:
# pylint: disable=consider-using-dict-comprehension
simple_modes = dict(
[(m.get("type", m.get("name")), m.get("id"))
for m in modes]
)
all_modes.update(simple_modes)
self._available_mode_ids = all_modes
except TypeError:
_LOGGER.debug("Did not receive a valid response. Passing..")
return self._available_mode_ids |
python | def predict(self, X):
"""
Predict the less costly class for a given observation
Note
----
The implementation here happens in a Python loop rather than in some
NumPy array operations, thus it will be slower than the other algorithms
here, even though in theory it implies fewer comparisons.
Parameters
----------
X : array (n_samples, n_features)
Data for which to predict minimum cost label.
method : str, either 'most-wins' or 'goodness':
How to decide the best label (see Note)
Returns
-------
y_hat : array (n_samples,)
Label with expected minimum cost for each observation.
"""
X = _check_2d_inp(X, reshape = True)
if X.shape[0] == 1:
return self._predict(X)
else:
shape_single = list(X.shape)
shape_single[0] = 1
pred = np.empty(X.shape[0], dtype = "int64")
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(delayed(self._predict)(row, pred, shape_single, X) for row in range(X.shape[0]))
return pred |
python | def get_doc_types(cls, exclude_base=False):
"""Returns the doc_type of this class and all of its descendants."""
names = []
if not exclude_base and hasattr(cls, 'search_objects'):
if not getattr(cls.search_objects.mapping, "elastic_abstract", False):
names.append(cls.search_objects.mapping.doc_type)
for subclass in cls.__subclasses__():
names += subclass.get_doc_types()
return names |
java | public void setDatasourceParams(JdbcDatabase database, com.mysql.jdbc.jdbc2.optional.MysqlDataSource dataSource)
{
String strURL = database.getProperty(SQLParams.JDBC_URL_PARAM);
if ((strURL == null) || (strURL.length() == 0))
strURL = database.getProperty(SQLParams.DEFAULT_JDBC_URL_PARAM); // Default
String strServer = database.getProperty(SQLParams.DB_SERVER_PARAM);
if ((strServer == null) || (strServer.length() == 0))
strServer = database.getProperty(SQLParams.DEFAULT_DB_SERVER_PARAM); // Default
if ((strServer == null) || (strServer.length() == 0))
strServer = "localhost"; //this.getProperty(DBParams.SERVER); // ??
String strDatabaseName = database.getDatabaseName(true);
if (strURL != null)
{
if (strServer != null)
strURL = Utility.replace(strURL, "{dbserver}", strServer);
strURL = Utility.replace(strURL, "{dbname}", strDatabaseName);
}
String strUsername = database.getProperty(SQLParams.USERNAME_PARAM);
if ((strUsername == null) || (strUsername.length() == 0))
strUsername = database.getProperty(SQLParams.DEFAULT_USERNAME_PARAM); // Default
String strPassword = database.getProperty(SQLParams.PASSWORD_PARAM);
if ((strPassword == null) || (strPassword.length() == 0))
strPassword = database.getProperty(SQLParams.DEFAULT_PASSWORD_PARAM); // Default
dataSource.setDatabaseName(strDatabaseName);
if (strServer != null)
dataSource.setServerName(strServer);
else
dataSource.setURL(strURL);
dataSource.setUser (strUsername);
dataSource.setPassword (strPassword);
} |
python | def bytes2human(n, fmt='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = old_div(float(n), prefix[symbol])
return fmt % locals()
return fmt % dict(symbol=symbols[0], value=n) |
java | public ICriterion<ReferenceClientParam> hasAnyOfIds(String... theIds) {
Validate.notNull(theIds, "theIds must not be null");
return hasAnyOfIds(Arrays.asList(theIds));
} |
python | def get_value(self, element):
"""
| Returns the given element value.
| If multiple elements with the same name exists, only the first encountered will be returned.
Usage::
>>> plist_file_parser = PlistFileParser("standard.plist")
>>> plist_file_parser.parse()
True
>>> plist_file_parser.get_value("String A")
u'My Value A'
:param element: Element to get the value.
:type element: unicode
:return: Element value.
:rtype: object
"""
if not self.__elements:
return
values = self.filter_values(r"^{0}$".format(element))
return foundations.common.get_first_item(values) |
java | protected Object invoke(final Object obj, final String method, final Object[] args)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException
{
final Class<?>[] c = new Class<?>[args.length];
for (int i = 0; i < c.length; ++i) {
c[i] = args[i].getClass();
if (c[i] == Integer.class)
c[i] = int.class;
}
try {
if (obj instanceof Class)
return ((Class<?>) obj).getMethod(method, c).invoke(null, args);
return obj.getClass().getMethod(method, c).invoke(obj, args);
}
catch (final IllegalArgumentException e) {
throw new IllegalArgumentException("illegal argument on invoking "
+ obj.getClass().getName() + "." + method + ": " + e.getMessage());
}
} |
python | def cmd(self, value):
"""
setter for final 'CMD' instruction in final build stage
"""
cmd = None
for insndesc in self.structure:
if insndesc['instruction'] == 'FROM': # new stage, reset
cmd = None
elif insndesc['instruction'] == 'CMD':
cmd = insndesc
new_cmd = 'CMD ' + value
if cmd:
self.add_lines_at(cmd, new_cmd, replace=True)
else:
self.add_lines(new_cmd) |
python | def summary(self, h):
"""
Summarize the results for each model for h steps of the algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of losses for each model
"""
_, losses, _ = self.run(h=h)
df = pd.DataFrame(losses)
df.index = ['Ensemble'] + self.model_names
df.columns = [self.loss_name]
return df |
python | def restore_descriptor(self, converted_descriptor):
"""Restore descriptor rom BigQuery
"""
# Convert
fields = []
for field in converted_descriptor['fields']:
field_type = self.restore_type(field['type'])
resfield = {
'name': field['name'],
'type': field_type,
}
if field.get('mode', 'NULLABLE') != 'NULLABLE':
resfield['constraints'] = {'required': True}
fields.append(resfield)
descriptor = {'fields': fields}
return descriptor |
java | public void switchReadOnlyConnection(Boolean mustBeReadOnly) throws SQLException {
if (urlParser.getOptions().assureReadOnly && currentReadOnlyAsked != mustBeReadOnly) {
proxy.lock.lock();
try {
// verify not updated now that hold lock, double check safe due to volatile
if (currentReadOnlyAsked != mustBeReadOnly) {
currentReadOnlyAsked = mustBeReadOnly;
setSessionReadOnly(mustBeReadOnly, currentProtocol);
}
} finally {
proxy.lock.unlock();
}
}
} |
python | def load_data(self, filename, *args, **kwargs):
"""
Load parameterized data from different sheets.
"""
# load parameterized data
data = super(ParameterizedXLS, self).load_data(filename)
# add parameter to data
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
# number of sheets
num_sheets = len(self.parameterization['parameter']['sheets'])
# parse and concatenate parameterized data
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k) # remove unused data keys
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data |
java | public Scope[] getChildren() {
if (mChildren == null) {
return new Scope[0];
}
else {
return mChildren.toArray(new Scope[mChildren.size()]);
}
} |
java | public boolean isEOF()
{
boolean bFlag = ((m_iRecordStatus & DBConstants.RECORD_AT_EOF) != 0);
if (this.isTable())
{ // If this is a table, it can't handle starting and ending keys.. do it manually
if (bFlag)
return bFlag;
if (this.getRecord().getKeyArea(-1).isModified(DBConstants.END_SELECT_KEY))
{
if (!bFlag)
bFlag = this.getRecord().checkParams(DBConstants.END_SELECT_KEY);
if (bFlag)
m_iRecordStatus |= DBConstants.RECORD_AT_EOF; // At EOF
}
}
return bFlag;
} |
java | protected boolean convertToBoolean(final Object fromStack) throws ParseException
{
if (fromStack instanceof Number)
/*
* 0 is the only number that is false, all others are true.
*/
return ((Number) fromStack).intValue() != 0;
if (fromStack instanceof String)
return Boolean.parseBoolean((String) fromStack);
if (fromStack instanceof Boolean)
return (Boolean) fromStack;
final StringBuilder errMsg = new StringBuilder();
errMsg.append("invalid type ");
errMsg.append(fromStack.getClass().getSimpleName());
throw new ParseException(errMsg.toString(), 0);
} |
python | def DeleteStoredProcedure(self, sproc_link, options=None):
"""Deletes a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The deleted Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.DeleteResource(path,
'sprocs',
sproc_id,
None,
options) |
java | private static int matchString(String str, CharSequence src, int begin, int end) {
final int patternLength = str.length();
int i = 0;
for (; (i < patternLength) && ((begin + i) < end); i++) {
final char exp = str.charAt(i);
final char enc = src.charAt(begin + i);
if (exp != enc) return Pattern.MISMATCH;
}
return i;
} |
java | public final void restoreDefaults() {
SharedPreferences sharedPreferences = getPreferenceManager().getSharedPreferences();
if (getPreferenceScreen() != null) {
restoreDefaults(getPreferenceScreen(), sharedPreferences);
}
} |
python | def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
) |
python | def configparser_to_backend_config(cp_instance):
"""
Return a config dict generated from a configparser instance.
This functions main purpose is to ensure config dict values are properly typed.
Note:
This can be used with any ``ConfigParser`` backend instance not just the default one
in order to extract its config.
If a key is not found in ``cp_instance`` the resulting dict will have ``None``
assigned to this dict key.
"""
def get_store():
# [TODO]
# This should be deligated to a dedicated validation function!
store = cp_instance.get('Backend', 'store')
if store not in hamster_lib.REGISTERED_BACKENDS.keys():
raise ValueError(_("Unrecognized store option."))
return store
def get_day_start():
try:
day_start = datetime.datetime.strptime(cp_instance.get('Backend',
'day_start'), '%H:%M:%S').time()
except ValueError:
raise ValueError(_(
"We encountered an error when parsing configs 'day_start'"
" value! Aborting ..."
))
return day_start
def get_fact_min_delta():
return cp_instance.getint('Backend', 'fact_min_delta')
def get_tmpfile_path():
return cp_instance.get('Backend', 'tmpfile_path')
def get_db_engine():
return text_type(cp_instance.get('Backend', 'db_engine'))
def get_db_path():
return text_type(cp_instance.get('Backend', 'db_path'))
def get_db_host():
return text_type(cp_instance.get('Backend', 'db_host'))
def get_db_port():
return cp_instance.getint('Backend', 'db_port')
def get_db_name():
return text_type(cp_instance.get('Backend', 'db_name'))
def get_db_user():
return text_type(cp_instance.get('Backend', 'db_user'))
def get_db_password():
return text_type(cp_instance.get('Backend', 'db_password'))
result = {
'store': get_store(),
'day_start': get_day_start(),
'fact_min_delta': get_fact_min_delta(),
'tmpfile_path': get_tmpfile_path(),
'db_engine': get_db_engine(),
'db_path': get_db_path(),
'db_host': get_db_host(),
'db_port': get_db_port(),
'db_name': get_db_name(),
'db_user': get_db_user(),
'db_password': get_db_password(),
}
return result |
java | protected void addLinkAndOptionsHttpHeaders(final FedoraResource resource) {
// Add Link headers
addResourceLinkHeaders(resource);
addAcceptExternalHeader();
// Add Options headers
final String options;
if (resource.isMemento()) {
options = "GET,HEAD,OPTIONS,DELETE";
} else if (resource instanceof FedoraTimeMap) {
options = "POST,HEAD,GET,OPTIONS";
servletResponse.addHeader("Vary-Post", MEMENTO_DATETIME_HEADER);
addAcceptPostHeader();
} else if (resource instanceof FedoraBinary) {
options = "DELETE,HEAD,GET,PUT,OPTIONS";
} else if (resource instanceof NonRdfSourceDescription) {
options = "HEAD,GET,DELETE,PUT,PATCH,OPTIONS";
servletResponse.addHeader(HTTP_HEADER_ACCEPT_PATCH, contentTypeSPARQLUpdate);
} else if (resource instanceof Container) {
options = "MOVE,COPY,DELETE,POST,HEAD,GET,PUT,PATCH,OPTIONS";
servletResponse.addHeader(HTTP_HEADER_ACCEPT_PATCH, contentTypeSPARQLUpdate);
addAcceptPostHeader();
} else {
options = "";
}
servletResponse.addHeader("Allow", options);
} |
java | private COFFFileHeader loadCOFFFileHeader(PESignature pesig,
RandomAccessFile raf) throws IOException {
// coff header starts right after the PE signature
long offset = pesig.getOffset() + PESignature.PE_SIG.length;
logger.info("COFF Header offset: " + offset);
// read bytes, size is fixed anyway
byte[] headerbytes = loadBytesSafely(offset,
COFFFileHeader.HEADER_SIZE, raf);
// construct header
return COFFFileHeader.newInstance(headerbytes, offset);
} |
java | public static <T> T fromJSON(final Class<T> targetClass, final String json) {
if (targetClass.equals(JsonObject.class)) {
return (T) JsonObject.from(N.fromJSON(Map.class, json));
} else if (targetClass.equals(JsonArray.class)) {
return (T) JsonArray.from(N.fromJSON(List.class, json));
} else if (targetClass.equals(JsonDocument.class)) {
final JsonObject jsonObject = JsonObject.from(N.fromJSON(Map.class, json));
final String id = N.stringOf(jsonObject.get(_ID));
jsonObject.removeKey(_ID);
return (T) JsonDocument.create(id, jsonObject);
} else {
throw new IllegalArgumentException("Unsupported type: " + ClassUtil.getCanonicalClassName(targetClass));
}
} |
java | public boolean save() {
filter(FILTER_BY_SAVE);
Config config = _getConfig();
Table table = _getTable();
StringBuilder sql = new StringBuilder();
List<Object> paras = new ArrayList<Object>();
config.dialect.forModelSave(table, attrs, sql, paras);
// if (paras.size() == 0) return false; // The sql "insert into tableName() values()" works fine, so delete this line
// --------
Connection conn = null;
PreparedStatement pst = null;
int result = 0;
try {
conn = config.getConnection();
if (config.dialect.isOracle()) {
pst = conn.prepareStatement(sql.toString(), table.getPrimaryKey());
} else {
pst = conn.prepareStatement(sql.toString(), Statement.RETURN_GENERATED_KEYS);
}
config.dialect.fillStatement(pst, paras);
result = pst.executeUpdate();
config.dialect.getModelGeneratedKey(this, pst, table);
_getModifyFlag().clear();
return result >= 1;
} catch (Exception e) {
throw new ActiveRecordException(e);
} finally {
config.close(pst, conn);
}
} |
python | def prepare(self):
"""Check that the file exists, optionally downloads it.
Checks that the file is indeed an SQLite3 database.
Optionally check the MD5."""
if not os.path.exists(self.path):
if self.retrieve:
print("Downloading SQLite3 database...")
download_from_url(self.retrieve, self.path, progress=True)
else: raise Exception("The file '" + self.path + "' does not exist.")
self.check_format()
if self.known_md5: assert self.known_md5 == self.md5
self.prepared = True |
python | def _get_pod_by_metric_label(self, labels):
"""
:param labels: metric labels: iterable
:return:
"""
pod_uid = self._get_pod_uid(labels)
return get_pod_by_uid(pod_uid, self.pod_list) |
python | def _escape_identifiers(self, item):
"""
This function escapes column and table names
@param item:
"""
if self._escape_char == '':
return item
for field in self._reserved_identifiers:
if item.find('.%s' % field) != -1:
_str = "%s%s" % (self._escape_char, item.replace('.', '%s.' % self._escape_char))
# remove duplicates if the user already included the escape
return re.sub(r'[%s]+'%self._escape_char, self._escape_char, _str)
if item.find('.') != -1:
_str = "%s%s%s" % (self._escape_char, item.replace('.', '%s.%s'%(self._escape_char, self._escape_char)),
self._escape_char)
else:
_str = self._escape_char+item+self._escape_char
# remove duplicates if the user already included the escape
return re.sub(r'[%s]+'%self._escape_char, self._escape_char, _str) |
python | def login(config, api_key=""):
"""Store your Bugzilla API Key"""
if not api_key:
info_out(
"If you don't have an API Key, go to:\n"
"https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n"
)
api_key = getpass.getpass("API Key: ")
# Before we store it, let's test it.
url = urllib.parse.urljoin(config.bugzilla_url, "/rest/whoami")
assert url.startswith("https://"), url
response = requests.get(url, params={"api_key": api_key})
if response.status_code == 200:
if response.json().get("error"):
error_out("Failed - {}".format(response.json()))
else:
update(
config.configfile,
{
"BUGZILLA": {
"bugzilla_url": config.bugzilla_url,
"api_key": api_key,
# "login": login,
}
},
)
success_out("Yay! It worked!")
else:
error_out("Failed - {} ({})".format(response.status_code, response.json())) |
java | protected void createLayers2() {
this.mapView2.getLayerManager()
.getLayers().add(AndroidUtil.createTileRendererLayer(this.tileCaches.get(1),
this.mapView2.getModel().mapViewPosition, getMapFile2(),
getRenderTheme2(), false, true, false));
} |
java | public static <A, EA extends A, A1, A2, A3> DecomposableMatchBuilder3<Tuple1<A>, A1, A2, A3> tuple1(
DecomposableMatchBuilder3<EA, A1, A2, A3> a) {
List<Matcher<Object>> matchers = Lists.of(ArgumentMatchers.any());
return new DecomposableMatchBuilder1<Tuple1<A>, EA>(matchers, 0, new Tuple1FieldExtractor<>())
.decomposeFirst(a);
} |
python | def gfm(text):
"""
Prepare text for rendering by a regular Markdown processor.
"""
def indent_code(matchobj):
syntax = matchobj.group(1)
code = matchobj.group(2)
if syntax:
result = ' :::' + syntax + '\n'
else:
result = ''
# The last line will be blank since it had the closing "```". Discard it
# when indenting the lines.
return result + '\n'.join([' ' + line for line in code.split('\n')[:-1]])
use_crlf = text.find('\r') != -1
if use_crlf:
text = text.replace('\r\n', '\n')
# Render GitHub-style ```code blocks``` into Markdown-style 4-space indented blocks
text = CODEPATTERN_RE.sub(indent_code, text)
text, code_blocks = remove_pre_blocks(text)
text, inline_blocks = remove_inline_code_blocks(text)
# Prevent foo_bar_baz from ending up with an italic word in the middle.
def italic_callback(matchobj):
s = matchobj.group(0)
# don't mess with URLs:
if 'http:' in s or 'https:' in s:
return s
return s.replace('_', r'\_')
# fix italics for code blocks
text = ITALICSPATTERN_RE.sub(italic_callback, text)
# linkify naked URLs
# wrap the URL in brackets: http://foo -> [http://foo](http://foo)
text = NAKEDURL_RE.sub(r'\1[\2](\2)\3', text)
# In very clear cases, let newlines become <br /> tags.
def newline_callback(matchobj):
if len(matchobj.group(1)) == 1:
return matchobj.group(0).rstrip() + ' \n'
else:
return matchobj.group(0)
text = NEWLINE_RE.sub(newline_callback, text)
# now restore removed code blocks
removed_blocks = code_blocks + inline_blocks
for removed_block in removed_blocks:
text = text.replace('{placeholder}', removed_block, 1)
if use_crlf:
text = text.replace('\n', '\r\n')
return text |
java | @ForOverride
String getCode(int startingIndent) {
FormattingContext initialStatements = new FormattingContext(startingIndent);
initialStatements.appendInitialStatements(this);
FormattingContext outputExprs = new FormattingContext(startingIndent);
if (this instanceof Expression) {
outputExprs.appendOutputExpression((Expression) this);
outputExprs.append(';').endLine();
}
return initialStatements.concat(outputExprs).toString();
} |
java | public static Element svgCircleSegment(SVGPlot svgp, double centerx, double centery, double angleStart, double angleDelta, double innerRadius, double outerRadius) {
final DoubleWrapper tmp = new DoubleWrapper(); // To return cosine
double sin1st = FastMath.sinAndCos(angleStart, tmp);
double cos1st = tmp.value;
double sin2nd = FastMath.sinAndCos(angleStart + angleDelta, tmp);
double cos2nd = tmp.value; // Note: tmp is modified!
double inner1stx = centerx + (innerRadius * sin1st);
double inner1sty = centery - (innerRadius * cos1st);
double outer1stx = centerx + (outerRadius * sin1st);
double outer1sty = centery - (outerRadius * cos1st);
double inner2ndx = centerx + (innerRadius * sin2nd);
double inner2ndy = centery - (innerRadius * cos2nd);
double outer2ndx = centerx + (outerRadius * sin2nd);
double outer2ndy = centery - (outerRadius * cos2nd);
double largeArc = angleDelta >= Math.PI ? 1 : 0;
SVGPath path = new SVGPath(inner1stx, inner1sty).lineTo(outer1stx, outer1sty) //
.ellipticalArc(outerRadius, outerRadius, 0, largeArc, 1, outer2ndx, outer2ndy) //
.lineTo(inner2ndx, inner2ndy);
if(innerRadius > 0) {
path.ellipticalArc(innerRadius, innerRadius, 0, largeArc, 0, inner1stx, inner1sty);
}
return path.makeElement(svgp);
} |
python | def _bokeh_quants(self, inf, sup, chart_type, color):
"""
Draw a chart to visualize quantiles
"""
try:
ds2 = self._duplicate_()
qi = ds2.df[ds2.y].quantile(inf)
qs = ds2.df[ds2.y].quantile(sup)
ds2.add("sup", qs)
ds2.add("inf", qi)
ds2.chart(ds2.x, ds2.y)
if chart_type == "point":
c = ds2.point_(opts=self.chart_opts, style=self.chart_style)
elif chart_type == "line_point":
c = ds2.line_point_(opts=self.chart_opts, style=self.chart_style)
else:
c = ds2.line_(opts=self.chart_opts, style=self.chart_style)
ds2.color(color)
ds2.chart(ds2.x, "sup")
c2 = ds2.line_()
ds2.chart(ds2.x, "inf")
c3 = ds2.line_()
ds2.rcolor()
return c * c2 * c3
except Exception as e:
self.err(e, "Can not draw quantile chart") |
python | def _move_content_to(self, other_tc):
"""
Append the content of this cell to *other_tc*, leaving this cell with
a single empty ``<w:p>`` element.
"""
if other_tc is self:
return
if self._is_empty:
return
other_tc._remove_trailing_empty_p()
# appending moves each element from self to other_tc
for block_element in self.iter_block_items():
other_tc.append(block_element)
# add back the required minimum single empty <w:p> element
self.append(self._new_p()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.