language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def get_data_filename(filename):
"""Map filename to its actual path.
Parameters
----------
filename : str
Filename to search.
Returns
-------
path : str
Full path to the file in data directory.
"""
global _data_map
if _data_map is None:
_data_map = {}
for root, dirs, files in os.walk(specdir):
for fname in files:
_data_map[fname] = os.path.join(root, fname)
if filename not in _data_map:
raise KeyError(filename + ' not found in ' + specdir)
return _data_map[filename] |
python | def set(self, name: str, value: Union[str, List[str]]) -> None:
"""
设置 header
"""
self._headers[name] = value |
java | public synchronized void sortResultsByEnergies() throws CDKException {
// System.out.println("\nSort By Energies");
Map<Integer, Map<Integer, Integer>> allEnergyMCS = new TreeMap<Integer, Map<Integer, Integer>>();
Map<Integer, Map<IAtom, IAtom>> allEnergyAtomMCS = new TreeMap<Integer, Map<IAtom, IAtom>>();
Map<Integer, Double> stereoScoreMap = new TreeMap<Integer, Double>();
Map<Integer, Integer> fragmentScoreMap = new TreeMap<Integer, Integer>();
Map<Integer, Double> energySelectionMap = new TreeMap<Integer, Double>();
initializeMaps(allEnergyMCS, allEnergyAtomMCS, stereoScoreMap, fragmentScoreMap, energySelectionMap);
for (Integer key : allEnergyMCS.keySet()) {
Map<Integer, Integer> mcsAtom = allEnergyMCS.get(key);
Double energies = getMappedMoleculeEnergies(mcsAtom);
energySelectionMap.put(key, energies);
}
energySelectionMap = sortMapByValueInAccendingOrder(energySelectionMap);
boolean flag = false;
double lowestEnergyScore = 99999999.99;
for (Integer key : energySelectionMap.keySet()) {
lowestEnergyScore = energySelectionMap.get(key).doubleValue();
flag = true;
clear();
break;
}
int counter = 0;
for (Map.Entry<Integer, Double> map : energySelectionMap.entrySet()) {
if (lowestEnergyScore == map.getValue().doubleValue()) {
addSolution(counter, map.getKey(), allEnergyAtomMCS, allEnergyMCS, stereoScoreMap, energySelectionMap,
fragmentScoreMap);
counter++;
//
// System.out.println("Energy key " + map.getKey() + "Energy MCS " + allEnergyMCS.get(map.getKey()));
// System.out.println("Frag Size: " + fragmentScoreMap.get(map.getKey()) + " Stereo Value: "
// + stereoScoreMap.get(map.getKey()));
}
}
if (flag) {
firstSolution.putAll(allMCS.get(0));
firstAtomMCS.putAll(allAtomMCS.get(0));
clear(allEnergyMCS, allEnergyAtomMCS, stereoScoreMap, fragmentScoreMap, energySelectionMap);
}
} |
java | @NonNull
@SuppressWarnings("WeakerAccess")
public SnackbarWrapper appendMessage(@NonNull CharSequence message, @ColorInt int color) {
Spannable spannable = new SpannableString(message);
spannable.setSpan(new ForegroundColorSpan(color), 0, spannable.length(),
Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
messageView.append(spannable);
return this;
} |
java | List<T> subList(long from, long to) {
if (fullListSize > 0 && to - from > 0) {
checkRange(from);
// subList takes a [from;to[ range
checkRange(to - 1);
// check if the data of the required sub list are available
assertSubRange(from, to - 1);
// cannot exceed Integer.MAX_VALUE (checked by checkSubRange)
return subList.subList((int) (from - subListOffset), (int) (to - subListOffset));
}
return new ArrayList<>();
} |
java | public Enumeration<String> getElements() {
AttributeNameEnumeration elements = new AttributeNameEnumeration();
elements.addElement(VERSION);
elements.addElement(SERIAL_NUMBER);
elements.addElement(ALGORITHM_ID);
elements.addElement(ISSUER);
elements.addElement(VALIDITY);
elements.addElement(SUBJECT);
elements.addElement(KEY);
elements.addElement(ISSUER_ID);
elements.addElement(SUBJECT_ID);
elements.addElement(EXTENSIONS);
return elements.elements();
} |
python | def select(self, *keys):
"""
指定查询返回结果中只包含某些字段。可以重复调用,每次调用的包含内容都将会被返回。
:param keys: 包含字段名
:rtype: Query
"""
if len(keys) == 1 and isinstance(keys[0], (list, tuple)):
keys = keys[0]
self._select += keys
return self |
python | def run(self, epochs):
"""
Description : Run training for LipNet
"""
best_loss = sys.maxsize
for epoch in trange(epochs):
iter_no = 0
## train
sum_losses, len_losses = self.train_batch(self.train_dataloader)
if iter_no % 20 == 0:
current_loss = sum_losses / len_losses
print("[Train] epoch:{e} iter:{i} loss:{l:.4f}".format(e=epoch,
i=iter_no,
l=current_loss))
## validating
sum_val_losses, len_val_losses = self.infer_batch(self.valid_dataloader)
current_val_loss = sum_val_losses / len_val_losses
print("[Vaild] epoch:{e} iter:{i} loss:{l:.4f}".format(e=epoch,
i=iter_no,
l=current_val_loss))
if best_loss > current_val_loss:
self.save_model(epoch, current_val_loss)
best_loss = current_val_loss
iter_no += 1 |
java | public void changeSite(String siteRoot) {
if (!getCmsObject().getRequestContext().getSiteRoot().equals(siteRoot)) {
getCmsObject().getRequestContext().setSiteRoot(siteRoot);
getWorkplaceSettings().setSite(siteRoot);
OpenCms.getSessionManager().updateSessionInfo(getCmsObject(), getHttpSession());
}
} |
java | private byte[] toJSONStringCompressed() {
String str = toJSONString();
if (str == null || str.length() == 0) {
return new byte[0] ;
}
byte[] outStr;
try {
outStr = compressJSONString(str);
} catch (IOException e) {
throw new RuntimeException("Failed to serialize Hashinator Configuration to Compressed JSON .", e);
}
return outStr;
} |
java | public long getColumnCardinality(String schema, String table, Authorizations auths, String family, String qualifier, Collection<Range> colValues)
throws ExecutionException
{
LOG.debug("Getting cardinality for %s:%s", family, qualifier);
// Collect all exact Accumulo Ranges, i.e. single value entries vs. a full scan
Collection<CacheKey> exactRanges = colValues.stream()
.filter(ColumnCardinalityCache::isExact)
.map(range -> new CacheKey(schema, table, family, qualifier, range, auths))
.collect(Collectors.toList());
LOG.debug("Column values contain %s exact ranges of %s", exactRanges.size(), colValues.size());
// Sum the cardinalities for the exact-value Ranges
// This is where the reach-out to Accumulo occurs for all Ranges that have not
// previously been fetched
long sum = cache.getAll(exactRanges).values().stream().mapToLong(Long::longValue).sum();
// If these collection sizes are not equal,
// then there is at least one non-exact range
if (exactRanges.size() != colValues.size()) {
// for each range in the column value
for (Range range : colValues) {
// if this range is not exact
if (!isExact(range)) {
// Then get the value for this range using the single-value cache lookup
sum += cache.get(new CacheKey(schema, table, family, qualifier, range, auths));
}
}
}
return sum;
} |
python | def make_figure(plots):
"""
:param plots: list of pairs (task_name, memory array)
"""
# NB: matplotlib is imported inside since it is a costly import
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.grid(True)
ax.set_xlabel('tasks')
ax.set_ylabel('GB')
start = 0
for task_name, mem in plots:
ax.plot(range(start, start + len(mem)), mem, label=task_name)
start += len(mem)
ax.legend()
return plt |
java | public void close() {
if (this.sock != null)
try {
this.sock_in.close();
this.sock_out.close();
this.sock.close();
this.sock=null;
this.sock_in=null;
this.sock_out=null;
} catch (IOException e) {
}
} |
python | def process_fish(self, limit=None):
"""
Fish give identifiers to the "effective genotypes" that we create.
We can match these by:
Fish = (intrinsic) genotype + set of morpholinos
We assume here that the intrinsic genotypes and their parts
will be processed separately, prior to calling this function.
:param limit:
:return:
"""
LOG.info("Processing Fish Parts")
raw = '/'.join((self.rawdir, self.files['fish_components']['file']))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
taxon_id = self.globaltt['Danio rerio']
geno = Genotype(graph)
allele_to_construct_hash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
(fish_num, fish_name, gene_num, gene_symbol, affector_num,
affector_symbol, construct_num, construct_symbol,
background_num, background_symbol, genotype_num,
genotype_name
# , empty
) = row
# fish have the following components:
# * genotype, which is the intrinsic genotype;
# this may be a genetic background (WT)
# * an optional background for the intrinsic genotype
# * affectors == alleles or morphants
# * constructs which may give rise to the affectors
# * affected genes
if fish_num not in self.fish_parts:
self.fish_parts[fish_num] = {}
self.fish_parts[fish_num] = {
'intrinsic_genotype': genotype_num,
'affectors': set(),
'fish_label': fish_name
}
# HACK - bad allele id - replace it with the new one FIXME
if affector_num == 'ZDB-ALT-090504-1':
affector_num = 'ZDB-ALT-040723-4'
self.fish_parts[fish_num]['affectors'].add(affector_num)
# add the constructs that the allele comes from
if construct_num != '':
if affector_num not in allele_to_construct_hash:
allele_to_construct_hash[affector_num] = set()
allele_to_construct_hash[affector_num].add(construct_num)
# ### finish looping through fish file
# given the components of a fish,
# subtract out the intrinsic parts to just leave the extrinsic
# to create the extrinsic genotypes.
line_counter = 0
for fish_num in self.fish_parts:
if self.test_mode and fish_num not in self.test_ids['fish']:
continue
line_counter += 1
fish_id = 'ZFIN:'+fish_num
fish = self.fish_parts[fish_num]
# get the intrinsic parts
intrinsic_genotype_num = fish['intrinsic_genotype']
intrinsic_genotype_id = 'ZFIN:'+intrinsic_genotype_num
intrinsic_genotype_label = self.id_label_map.get(
intrinsic_genotype_id)
if intrinsic_genotype_num not in self.geno_alleles:
intrinsic_parts = set()
else:
intrinsic_parts = self.geno_alleles[intrinsic_genotype_num]
# subtract out the intrinsic parts, to get the extrinsic parts
extrinsic_parts = fish['affectors'] - intrinsic_parts
extrinsic_list = list(sorted(extrinsic_parts))
# build up the extrinsic genotype from it's parts.
# these will be reagents/morphants.
if len(extrinsic_list) > 0:
list_of_targeted_genes = []
gene_to_reagent_hash = {}
for eid in extrinsic_list:
# link the morpholino to the genes that it affects
eid = 'ZFIN:' + eid
# just in case, skip over the ALTs
if re.search(r'ALT', eid):
continue
ag = self.variant_loci_genes.get(eid)
# LOG.debug("%s affected genes %s", eid, str(ag))
if ag is None:
pass
# LOG.warn("No affected genes for %s", eid)
else:
# turn the gene-targeting-reagents inside out,
# such that instead of morph -> set(genes)
# we make a gene -> set(morphs)
for gid in ag:
if gid not in gene_to_reagent_hash:
gene_to_reagent_hash[gid] = set()
gene_to_reagent_hash[gid].add(eid)
# end loop through each extrinsic component
for gid in gene_to_reagent_hash:
reagent_list = sorted(list(gene_to_reagent_hash.get(gid)))
# create variant gene(s) that have been targeted
# by the reagent
if gid not in self.id_label_map:
# should not happen, except maybe in testing
LOG.error("%s not in id-label-hash", gid)
glabel = gid
else:
glabel = self.id_label_map[gid]
eid = '-'.join(reagent_list)
targeted_gene_id = self.make_targeted_gene_id(
gid, eid)
# get the reagent labels
elabel = ', '.join(
self.id_label_map.get(l) for l in reagent_list)
if elabel is None:
elabel = eid # should not happen, but just in case
targeted_gene_label = glabel + '<' + elabel + '>'
for r in reagent_list:
geno.addReagentTargetedGene(r, gid, targeted_gene_id,
targeted_gene_label)
self.id_label_map[targeted_gene_id] = targeted_gene_label
list_of_targeted_genes += [targeted_gene_id]
# end loop through each gene that is targeted
list_of_targeted_genes = sorted(list_of_targeted_genes)
extrinsic_id = '_:'+re.sub(
r':?_?', '', '-'.join(list_of_targeted_genes))
extrinsic_label = '; '.join(
str(self.id_label_map.get(l))
for l in list_of_targeted_genes)
self.id_label_map[extrinsic_id] = extrinsic_label
# add the parts
for tg in list_of_targeted_genes:
if tg != extrinsic_id:
geno.addParts(
tg, extrinsic_id, self.globaltt['has_variant_part'])
else:
extrinsic_id = None
extrinsic_label = None
if extrinsic_id is not None:
geno.addGenotype(
extrinsic_id, extrinsic_label, self.globaltt['extrinsic_genotype'])
geno.addParts(
extrinsic_id, fish_id, self.globaltt['has_variant_part'])
# check if the intrinsic is in the wildtype genotypes,
# then it's a genomic background
if intrinsic_genotype_id in self.wildtype_genotypes:
intrinsic_rel = self.globaltt['has_reference_part']
intrinsic_type = self.globaltt['genomic_background']
else:
intrinsic_rel = self.globaltt['has_variant_part']
intrinsic_type = self.globaltt['intrinsic_genotype']
geno.addGenotype(
intrinsic_genotype_id, intrinsic_genotype_label, intrinsic_type)
# add the intrinsic to the fish
geno.addParts(intrinsic_genotype_id, fish_id, intrinsic_rel)
# build the fish label
if extrinsic_id is None:
fish_label = intrinsic_genotype_label
else:
fish_label = '; '.join((
str(intrinsic_genotype_label), extrinsic_label))
fish_type = self.globaltt['effective_genotype']
geno.addGenotype(fish_id, fish_label, fish_type)
geno.addTaxon(taxon_id, fish_id)
# since we re-create a label,
# add the zfin fish label as the synonym
model.addSynonym(fish_id, fish['fish_label'])
self.id_label_map[fish_id] = fish_label
if not self.test_mode and limit is not None and line_counter > limit:
break
# ###finish iterating over fish
# iterate of the alleles and attache the constructs to them
LOG.info("Adding Allele/Construct relationships")
for a in allele_to_construct_hash:
if self.test_mode and a not in self.test_ids['allele']:
continue
allele_id = 'ZFIN:' + a
constructs = allele_to_construct_hash.get(a)
if len(constructs) > 0:
for c in constructs:
cid = 'ZFIN:' + c
geno.addSequenceDerivesFrom(allele_id, cid)
# LOG.info("constructs for %s: %s", allele_id,
# str(constructs))
# migrate the transgenic features to be alternate parts
# of the transgene insertion/alteration
if cid in self.transgenic_parts:
tg_parts = self.transgenic_parts.get(cid)
if tg_parts is not None:
for p in tg_parts:
# HACK - if it's a promoter part,
# then make it a simple has_part
if re.search(r'promoter', p):
r = self.globaltt['has_part']
else:
r = self.globaltt['has_variant_part']
geno.addParts(p, allele_id, r)
return |
python | def job_tasks(self, job_id, type=None):
"""
With the tasks API, you can obtain a collection of resources that
represent a task within a job.
:param str job_id: The job id
:param str type: type of task, valid values are m or r. m for map
task or r for reduce task
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response`
"""
path = '/ws/v1/history/mapreduce/jobs/{jobid}/tasks'.format(
jobid=job_id)
# m - for map
# r - for reduce
valid_types = ['m', 'r']
if type is not None and type not in valid_types:
msg = 'Job type %s is illegal' % (type,)
raise IllegalArgumentError(msg)
params = {}
if type is not None:
params['type'] = type
return self.request(path, **params) |
python | def get_context_data(self, **kwargs):
"""
We supplement the normal context data by adding our fields and labels.
"""
context = super(SmartView, self).get_context_data(**kwargs)
# derive our field config
self.field_config = self.derive_field_config()
# add our fields
self.fields = self.derive_fields()
# build up our current parameter string, EXCLUSIVE of our page. These
# are used to build pagination URLs
url_params = "?"
order_params = ""
for key in self.request.GET.keys():
if key != 'page' and key != 'pjax' and (len(key) == 0 or key[0] != '_'):
for value in self.request.GET.getlist(key):
url_params += "%s=%s&" % (key, urlquote(value))
elif key == '_order':
order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.GET.getlist(key)])
context['url_params'] = url_params
context['order_params'] = order_params + "&"
context['pjax'] = self.pjax
# set our blocks
context['blocks'] = dict()
# stuff it all in our context
context['fields'] = self.fields
context['view'] = self
context['field_config'] = self.field_config
context['title'] = self.derive_title()
# and any extra context the user specified
context.update(self.extra_context)
# by default, our base is 'base.html', but we might be pjax
base_template = "base.html"
if 'pjax' in self.request.GET or 'pjax' in self.request.POST:
base_template = "smartmin/pjax.html"
if 'HTTP_X_PJAX' in self.request.META:
base_template = "smartmin/pjax.html"
context['base_template'] = base_template
# set our refresh if we have one
refresh = self.derive_refresh()
if refresh:
context['refresh'] = refresh
return context |
java | @Nonnull
public static Price createFromNetAmount (@Nonnull final ICurrencyValue aNetAmount, @Nonnull final IVATItem aVATItem)
{
return new Price (aNetAmount, aVATItem);
} |
java | public static String getDidYouMeanString(Collection<String> available, String it) {
String message = "";
Collection<String> mins = Levenshtein.findSimilar(available, it);
if (mins.size() > 0) {
if (mins.size() == 1) {
message += "Did you mean this?";
} else {
message += "Did you mean one of these?";
}
for (String m : mins) {
message += "\n\t" + m;
}
}
return message;
} |
python | def _file_not_empty(tmpfile):
"""
Returns True if file exists and it is not empty
to check if it is time to read container ID from cidfile
:param tmpfile: str, path to file
:return: bool, True if container id is written to the file
"""
if os.path.exists(tmpfile):
return os.stat(tmpfile).st_size != 0
else:
return False |
python | def as_base_types(self):
"""Convert this measurement to a dict of basic types."""
if not self._cached:
# Create the single cache file the first time this is called.
self._cached = {
'name': self.name,
'outcome': self.outcome.name,
}
if self.validators:
self._cached['validators'] = data.convert_to_base_types(
tuple(str(v) for v in self.validators))
if self.dimensions:
self._cached['dimensions'] = data.convert_to_base_types(self.dimensions)
if self.units:
self._cached['units'] = data.convert_to_base_types(self.units)
if self.docstring:
self._cached['docstring'] = self.docstring
if self.measured_value.is_value_set:
self._cached['measured_value'] = self.measured_value.basetype_value()
return self._cached |
java | private void subFormat(int patternCharIndex, int count,
FieldDelegate delegate, StringBuffer buffer,
boolean useDateFormatSymbols)
{
int maxIntCount = Integer.MAX_VALUE;
String current = null;
int beginOffset = buffer.length();
int field = PATTERN_INDEX_TO_CALENDAR_FIELD[patternCharIndex];
int value;
if (field == CalendarBuilder.WEEK_YEAR) {
if (calendar.isWeekDateSupported()) {
value = calendar.getWeekYear();
} else {
// use calendar year 'y' instead
patternCharIndex = PATTERN_YEAR;
field = PATTERN_INDEX_TO_CALENDAR_FIELD[patternCharIndex];
value = calendar.get(field);
}
} else if (field == CalendarBuilder.ISO_DAY_OF_WEEK) {
value = CalendarBuilder.toISODayOfWeek(calendar.get(Calendar.DAY_OF_WEEK));
} else {
value = calendar.get(field);
}
int style = (count >= 4) ? Calendar.LONG : Calendar.SHORT;
if (!useDateFormatSymbols && field != CalendarBuilder.ISO_DAY_OF_WEEK) {
current = calendar.getDisplayName(field, style, locale);
}
// Note: zeroPaddingNumber() assumes that maxDigits is either
// 2 or maxIntCount. If we make any changes to this,
// zeroPaddingNumber() must be fixed.
switch (patternCharIndex) {
case PATTERN_ERA: // 'G'
if (useDateFormatSymbols) {
String[] eras = formatData.getEras();
if (value < eras.length)
current = eras[value];
}
if (current == null)
current = "";
break;
case PATTERN_WEEK_YEAR: // 'Y'
case PATTERN_YEAR: // 'y'
if (calendar instanceof GregorianCalendar) {
if (count != 2)
zeroPaddingNumber(value, count, maxIntCount, buffer);
else // count == 2
zeroPaddingNumber(value, 2, 2, buffer); // clip 1996 to 96
} else {
if (current == null) {
zeroPaddingNumber(value, style == Calendar.LONG ? 1 : count,
maxIntCount, buffer);
}
}
break;
case PATTERN_STANDALONE_MONTH: // 'L'
{
current = formatMonth(count, value, maxIntCount, buffer, useDateFormatSymbols,
true /* standalone */);
break;
}
case PATTERN_MONTH: // 'M'
{
current = formatMonth(count, value, maxIntCount, buffer, useDateFormatSymbols,
false /* standalone */);
break;
}
case PATTERN_HOUR_OF_DAY1: // 'k' 1-based. eg, 23:59 + 1 hour =>> 24:59
if (current == null) {
if (value == 0)
zeroPaddingNumber(calendar.getMaximum(Calendar.HOUR_OF_DAY)+1,
count, maxIntCount, buffer);
else
zeroPaddingNumber(value, count, maxIntCount, buffer);
}
break;
case PATTERN_STANDALONE_DAY_OF_WEEK: // 'c'
{
current = formatWeekday(count, value, useDateFormatSymbols, true /* standalone */);
break;
}
case PATTERN_DAY_OF_WEEK: // 'E'
{
current = formatWeekday(count, value, useDateFormatSymbols, false /* standalone */);
break;
}
case PATTERN_AM_PM: // 'a'
if (useDateFormatSymbols) {
String[] ampm = formatData.getAmPmStrings();
current = ampm[value];
}
break;
// Android-added: Ignore 'b' and 'B' introduced in CLDR 32+ pattern data. http://b/68139386
// Not currently supported here.
case PATTERN_DAY_PERIOD:
case PATTERN_FLEXIBLE_DAY_PERIOD:
current = "";
break;
case PATTERN_HOUR1: // 'h' 1-based. eg, 11PM + 1 hour =>> 12 AM
if (current == null) {
if (value == 0)
zeroPaddingNumber(calendar.getLeastMaximum(Calendar.HOUR)+1,
count, maxIntCount, buffer);
else
zeroPaddingNumber(value, count, maxIntCount, buffer);
}
break;
case PATTERN_ZONE_NAME: // 'z'
if (current == null) {
TimeZone tz = calendar.getTimeZone();
boolean daylight = (calendar.get(Calendar.DST_OFFSET) != 0);
int tzstyle = count < 4 ? TimeZone.SHORT : TimeZone.LONG;
String zoneString = tz.getDisplayName(daylight, tzstyle, formatData.locale);
if (zoneString != null) {
buffer.append(zoneString);
} else {
int offsetMillis = calendar.get(Calendar.ZONE_OFFSET) +
calendar.get(Calendar.DST_OFFSET);
buffer.append(TimeZone.createGmtOffsetString(true, true, offsetMillis));
}
}
break;
case PATTERN_ZONE_VALUE: // 'Z' ("-/+hhmm" form)
{
value = calendar.get(Calendar.ZONE_OFFSET) + calendar.get(Calendar.DST_OFFSET);
final boolean includeSeparator = (count >= 4);
final boolean includeGmt = (count == 4);
buffer.append(TimeZone.createGmtOffsetString(includeGmt, includeSeparator, value));
break;
}
case PATTERN_ISO_ZONE: // 'X'
value = calendar.get(Calendar.ZONE_OFFSET)
+ calendar.get(Calendar.DST_OFFSET);
if (value == 0) {
buffer.append('Z');
break;
}
value /= 60000;
if (value >= 0) {
buffer.append('+');
} else {
buffer.append('-');
value = -value;
}
CalendarUtils.sprintf0d(buffer, value / 60, 2);
if (count == 1) {
break;
}
if (count == 3) {
buffer.append(':');
}
CalendarUtils.sprintf0d(buffer, value % 60, 2);
break;
case PATTERN_MILLISECOND: // 'S'
// Fractional seconds must be treated specially. We must always convert the parsed
// value into a fractional second [0, 1) and then widen it out to the appropriate
// formatted size. For example, an initial value of 789 will be converted
// 0.789 and then become ".7" (S) or ".78" (SS) or "0.789" (SSS) or "0.7890" (SSSS)
// in the resulting formatted output.
if (current == null) {
value = (int) (((double) value / 1000) * Math.pow(10, count));
zeroPaddingNumber(value, count, count, buffer);
}
break;
default:
// case PATTERN_DAY_OF_MONTH: // 'd'
// case PATTERN_HOUR_OF_DAY0: // 'H' 0-based. eg, 23:59 + 1 hour =>> 00:59
// case PATTERN_MINUTE: // 'm'
// case PATTERN_SECOND: // 's'
// case PATTERN_DAY_OF_YEAR: // 'D'
// case PATTERN_DAY_OF_WEEK_IN_MONTH: // 'F'
// case PATTERN_WEEK_OF_YEAR: // 'w'
// case PATTERN_WEEK_OF_MONTH: // 'W'
// case PATTERN_HOUR0: // 'K' eg, 11PM + 1 hour =>> 0 AM
// case PATTERN_ISO_DAY_OF_WEEK: // 'u' pseudo field, Monday = 1, ..., Sunday = 7
if (current == null) {
zeroPaddingNumber(value, count, maxIntCount, buffer);
}
break;
} // switch (patternCharIndex)
if (current != null) {
buffer.append(current);
}
int fieldID = PATTERN_INDEX_TO_DATE_FORMAT_FIELD[patternCharIndex];
Field f = PATTERN_INDEX_TO_DATE_FORMAT_FIELD_ID[patternCharIndex];
delegate.formatted(fieldID, f, f, beginOffset, buffer.length(), buffer);
} |
python | def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
"""
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves - side1) |
java | @Override
protected TransactionManager locateTransactionManager() {
try {
return TransactionManagerLocator.INSTANCE.getTransactionManager();
} catch (Exception e) {
throw new HibernateException(e);
}
} |
python | def visit_Compare(self, node):
""" Boolean are possible index.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('''
... def foo():
... a = 2 or 3
... b = 4 or 5
... c = a < b
... d = b < 3
... e = b == 4''')
>>> pm = passmanager.PassManager("test")
>>> res = pm.gather(RangeValues, node)
>>> res['c']
Interval(low=1, high=1)
>>> res['d']
Interval(low=0, high=0)
>>> res['e']
Interval(low=0, high=1)
"""
if any(isinstance(op, (ast.In, ast.NotIn, ast.Is, ast.IsNot))
for op in node.ops):
self.generic_visit(node)
return self.add(node, Interval(0, 1))
curr = self.visit(node.left)
res = []
for op, comparator in zip(node.ops, node.comparators):
comparator = self.visit(comparator)
fake = ast.Compare(ast.Name('x', ast.Load(), None),
[op],
[ast.Name('y', ast.Load(), None)])
fake = ast.Expression(fake)
ast.fix_missing_locations(fake)
expr = compile(ast.gast_to_ast(fake), '<range_values>', 'eval')
res.append(eval(expr, {'x': curr, 'y': comparator}))
if all(res):
return self.add(node, Interval(1, 1))
elif any(r.low == r.high == 0 for r in res):
return self.add(node, Interval(0, 0))
else:
return self.add(node, Interval(0, 1)) |
java | private Status executeIf(Stmt.IfElse stmt, CallStack frame, EnclosingScope scope) {
RValue.Bool operand = executeExpression(BOOL_T, stmt.getCondition(), frame);
if (operand == RValue.True) {
// branch taken, so execute true branch
return executeBlock(stmt.getTrueBranch(), frame, scope);
} else if (stmt.hasFalseBranch()) {
// branch not taken, so execute false branch
return executeBlock(stmt.getFalseBranch(), frame, scope);
} else {
return Status.NEXT;
}
} |
java | public void logProperties(final Logger logger, final String comment) {
logger.info(comment);
for (final String key : getKeySet()) {
logger.info(" key=" + key + " value=" + get(key));
}
} |
java | public static ZoneOffset ofHoursMinutesSeconds(int hours, int minutes, int seconds) {
validate(hours, minutes, seconds);
int totalSeconds = totalSeconds(hours, minutes, seconds);
return ofTotalSeconds(totalSeconds);
} |
python | def wr_xlsx_gos(self, fout_xlsx, **kws_usr):
"""Write an Excel spreadsheet with user GO ids, grouped under broader GO terms."""
# Keyword arguments: control content
desc2nts = self.sortobj.get_desc2nts(**kws_usr)
# Keyword arguments: control xlsx format
self.wr_xlsx_nts(fout_xlsx, desc2nts, **kws_usr)
return desc2nts |
java | public RandomVariable getMonteCarloWeights(int timeIndex)
{
// Lazy initialization, synchronized for thread safety
synchronized(this) {
if(discreteProcessWeights == null || discreteProcessWeights.length == 0) {
doPrecalculateProcess();
}
}
// Return value of process
return discreteProcessWeights[timeIndex];
} |
java | public void marshall(S3DataSpec s3DataSpec, ProtocolMarshaller protocolMarshaller) {
if (s3DataSpec == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(s3DataSpec.getDataLocationS3(), DATALOCATIONS3_BINDING);
protocolMarshaller.marshall(s3DataSpec.getDataRearrangement(), DATAREARRANGEMENT_BINDING);
protocolMarshaller.marshall(s3DataSpec.getDataSchema(), DATASCHEMA_BINDING);
protocolMarshaller.marshall(s3DataSpec.getDataSchemaLocationS3(), DATASCHEMALOCATIONS3_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def _on_mouse_moved(self, event):
""" mouse moved callback """
if event.modifiers() & Qt.ControlModifier:
self._select_word_under_mouse_cursor()
else:
self._remove_decoration()
self.editor.set_mouse_cursor(Qt.IBeamCursor)
self._previous_cursor_start = -1
self._previous_cursor_end = -1 |
python | def _set_interface_isis(self, v, load=False):
"""
Setter method for interface_isis, mapped from YANG variable /routing_system/interface/ve/intf_isis/interface_isis (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=interface_isis.interface_isis, is_container='container', presence=False, yang_name="interface-isis", rest_name="isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS routing protocol', u'cli-incomplete-no': None, u'alt-name': u'isis', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_isis must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=interface_isis.interface_isis, is_container='container', presence=False, yang_name="interface-isis", rest_name="isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IS-IS routing protocol', u'cli-incomplete-no': None, u'alt-name': u'isis', u'sort-priority': u'131'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""",
})
self.__interface_isis = t
if hasattr(self, '_set'):
self._set() |
java | private static Set<String> getRelationLevelSet(SDocumentGraph graph, String namespace,
Class<? extends SRelation> type) {
Set<String> result = new TreeSet<>();
if (graph != null) {
List<? extends SRelation> edges = null;
if (type == SDominanceRelation.class) {
edges = graph.getDominanceRelations();
} else if (type == SPointingRelation.class) {
edges = graph.getPointingRelations();
} else if (type == SSpanningRelation.class) {
edges = graph.getSpanningRelations();
}
if (edges != null) {
for (SRelation<?, ?> edge : edges) {
Set<SLayer> layers = edge.getLayers();
for (SLayer layer : layers) {
if (namespace == null || namespace.equals(layer.getName())) {
for (SAnnotation anno : edge.getAnnotations()) {
result.add(anno.getQName());
}
// we got all annotations of this edge, jump to next edge
break;
} // end if namespace equals layer name
} // end for each layer
} // end for each edge
}
}
return result;
} |
python | def generate_roster_pdf(sched_act_ids, include_instructions):
r"""Generates a PDF roster for one or more.
:class:`EighthScheduledActivity`\s.
Args
sched_act_ids
The list of IDs of the scheduled activities to show in the PDF.
include_instructions
Whether instructions should be printed at the bottom of the
roster(s).
Returns a BytesIO object for the PDF.
"""
pdf_buffer = BytesIO()
h_margin = 1 * inch
v_margin = 0.5 * inch
doc = SimpleDocTemplate(pdf_buffer, pagesize=letter, rightMargin=h_margin, leftMargin=h_margin, topMargin=v_margin, bottomMargin=v_margin)
elements = []
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name="Center", alignment=TA_CENTER))
styles.add(ParagraphStyle(name="BlockLetter", fontSize=60, leading=72, alignment=TA_CENTER))
styles.add(ParagraphStyle(name="BlockLetterSmall", fontSize=30, leading=72, alignment=TA_CENTER))
styles.add(ParagraphStyle(name="BlockLetterSmallest", fontSize=20, leading=72, alignment=TA_CENTER))
styles.add(ParagraphStyle(name="ActivityAttribute", fontSize=15, leading=18, alignment=TA_RIGHT))
for i, said in enumerate(sched_act_ids):
sact = EighthScheduledActivity.objects.get(id=said)
sponsor_names = sact.get_true_sponsors().values_list("first_name", "last_name")
sponsors_str = "; ".join(l + ", " + f for f, l in sponsor_names)
room_names = sact.get_true_rooms().values_list("name", flat=True)
if len(room_names) == 1:
rooms_str = "Room " + room_names[0]
else:
rooms_str = "Rooms: " + ", ".join(r for r in room_names)
block_letter = sact.block.block_letter
if len(block_letter) < 4:
block_letter_width = 1 * inch
block_letter_width += (0.5 * inch) * (len(block_letter) - 1)
block_letter_style = "BlockLetter"
elif len(block_letter) < 7:
block_letter_width = 0.4 * inch
block_letter_width += (0.3 * inch) * (len(block_letter) - 1)
block_letter_style = "BlockLetterSmall"
else:
block_letter_width = 0.3 * inch
block_letter_width += (0.2 * inch) * (len(block_letter) - 1)
block_letter_style = "BlockLetterSmallest"
header_data = [[
Paragraph("<b>Activity ID: {}<br />Scheduled ID: {}</b>".format(sact.activity.id, sact.id), styles["Normal"]),
Paragraph("{}<br/>{}<br/>{}".format(sponsors_str, rooms_str, sact.block.date.strftime("%A, %B %-d, %Y")), styles["ActivityAttribute"]),
Paragraph(block_letter, styles[block_letter_style])
]]
header_style = TableStyle([("VALIGN", (0, 0), (0, 0), "TOP"), ("VALIGN", (1, 0), (2, 0), "MIDDLE"), ("TOPPADDING", (0, 0), (0, 0), 15),
("RIGHTPADDING", (1, 0), (1, 0), 0)])
elements.append(Table(header_data, style=header_style, colWidths=[2 * inch, None, block_letter_width]))
elements.append(Spacer(0, 10))
elements.append(Paragraph(sact.full_title, styles["Title"]))
num_members = sact.members.count()
num_members_label = "{} Student{}".format(num_members, "s" if num_members != 1 else "")
elements.append(Paragraph(num_members_label, styles["Center"]))
elements.append(Spacer(0, 5))
attendance_data = [[
Paragraph("Present", styles["Heading5"]),
Paragraph("Student Name (ID)", styles["Heading5"]),
Paragraph("Grade", styles["Heading5"])
]]
members = []
for member in sact.members.all():
members.append((member.last_name + ", " + member.first_name, (member.student_id if member.student_id else "User {}".format(member.id)),
int(member.grade) if member.grade else "?"))
members = sorted(members)
for member_name, member_id, member_grade in members:
row = ["", "{} ({})".format(member_name, member_id), member_grade]
attendance_data.append(row)
# Line commands are like this:
# op, start, stop, weight, colour, cap, dashes, join, linecount, linespacing
attendance_style = TableStyle([
("LINEABOVE", (0, 1), (2, 1), 1, colors.black, None, None, None, 2),
("LINEBELOW", (0, 1), (0, len(attendance_data)), 1, colors.black),
("TOPPADDING", (0, 1), (-1, -1), 6),
("BOTTOMPADDING", (0, 1), (-1, -1), 0),
("BOTTOMPADDING", (0, 0), (-1, 0), 5),
])
elements.append(Table(attendance_data, style=attendance_style, colWidths=[1.3 * inch, None, 0.8 * inch]))
elements.append(Spacer(0, 15))
instructions = """
<b>Highlight or circle</b> the names of students who are <b>absent</b>, and put an <b>"X"</b> next to those <b>present</b>.<br />
If a student arrives and their name is not on the roster, please send them to the <b>8th Period Office</b>.<br />
If a student leaves your activity early, please make a note. <b>Do not make any additions to the roster.</b><br />
Before leaving for the day, return the roster and any passes to 8th Period coordinator, Catherine Forrester's mailbox in the
<b>main office</b>. For questions, please call extension 5046 or 5078. Thank you!<br />"""
elements.append(Paragraph(instructions, styles["Normal"]))
if i != len(sched_act_ids) - 1:
elements.append(PageBreak())
def first_page(canvas, _):
canvas.setTitle("Eighth Activity Roster")
canvas.setAuthor("Generated by Ion")
doc.build(elements, onFirstPage=first_page)
return pdf_buffer |
python | def array_map(f, ar):
"Apply an ordinary function to all values in an array."
flat_ar = ravel(ar)
out = zeros(len(flat_ar), flat_ar.typecode())
for i in range(len(flat_ar)):
out[i] = f(flat_ar[i])
out.shape = ar.shape
return out |
python | def connection_lost(self, exc):
"""Log when connection is closed, if needed call callback."""
if exc:
log.exception('disconnected due to exception')
else:
log.info('disconnected because of close/abort.')
if self.disconnect_callback:
self.disconnect_callback(exc) |
python | def probeLine(img, p1, p2, res=100):
"""
Takes a ``vtkImageData`` and probes its scalars along a line defined by 2 points `p1` and `p2`.
.. hint:: |probeLine| |probeLine.py|_
"""
line = vtk.vtkLineSource()
line.SetResolution(res)
line.SetPoint1(p1)
line.SetPoint2(p2)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(img)
probeFilter.SetInputConnection(line.GetOutputPort())
probeFilter.Update()
lact = Actor(probeFilter.GetOutput(), c=None) # ScalarVisibilityOn
lact.mapper.SetScalarRange(img.GetScalarRange())
return lact |
python | def get_stetson_k(self, mag, avg, err):
"""
Return Stetson K feature.
Parameters
----------
mag : array_like
An array of magnitude.
avg : float
An average value of magnitudes.
err : array_like
An array of magnitude errors.
Returns
-------
stetson_k : float
Stetson K value.
"""
residual = (mag - avg) / err
stetson_k = np.sum(np.fabs(residual)) \
/ np.sqrt(np.sum(residual * residual)) / np.sqrt(len(mag))
return stetson_k |
java | public void setFilters(java.util.Collection<NamespaceFilter> filters) {
if (filters == null) {
this.filters = null;
return;
}
this.filters = new java.util.ArrayList<NamespaceFilter>(filters);
} |
java | private void leftAddNoPredecessor(
GBSNode p,
Object new1,
NodeInsertPoint ip,
InsertNodes point)
{
p.findInsertPointInLeft(new1, ip);
point.setInsert(p, ip);
} |
java | public void pushCallbackBeanO(BeanO bean) // d662032
throws CSIException
{
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
Tr.debug(tc, "pushCallbackBeanO: " + bean);
HandleListInterface hl = bean.reAssociateHandleList();
ivHandleListContext.beginContext(hl);
ivCallbackBeanOStack.push(bean);
} |
java | protected void writeZeroBranchSize(final BitOutputStream out,
final long value, final long max, final Bits bits) throws IOException {
assert 0 <= value;
assert max >= value;
if (SERIALIZATION_CHECKS) {
out.write(SerializationChecks.BeforeCount);
}
if (this.useBinomials) {
Gaussian.fromBinomial(0.5, max).encode(out, value, max);
}
else {
out.writeBoundedLong(value, 1 + max);
}
if (SERIALIZATION_CHECKS) {
out.write(SerializationChecks.AfterCount);
}
} |
python | def batch_get_documents(
self,
database,
documents,
mask=None,
transaction=None,
new_transaction=None,
read_time=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> database = client.database_root_path('[PROJECT]', '[DATABASE]')
>>>
>>> # TODO: Initialize `documents`:
>>> documents = []
>>>
>>> for element in client.batch_get_documents(database, documents):
... # process element
... pass
Args:
database (str): The database name. In the format:
``projects/{project_id}/databases/{database_id}``.
documents (list[str]): The names of the documents to retrieve. In the format:
``projects/{project_id}/databases/{database_id}/documents/{document_path}``.
The request will fail if any of the document is not a child resource of
the given ``database``. Duplicate names will be elided.
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If a document has a field that is not present in this mask, that field will
not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
transaction (bytes): Reads documents in a transaction.
new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents.
Defaults to a read-only transaction.
The new transaction ID will be returned as the first response in the
stream.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions`
read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time.
This may not be older than 60 seconds.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Timestamp`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_get_documents" not in self._inner_api_calls:
self._inner_api_calls[
"batch_get_documents"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_get_documents,
default_retry=self._method_configs["BatchGetDocuments"].retry,
default_timeout=self._method_configs["BatchGetDocuments"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
transaction=transaction,
new_transaction=new_transaction,
read_time=read_time,
)
request = firestore_pb2.BatchGetDocumentsRequest(
database=database,
documents=documents,
mask=mask,
transaction=transaction,
new_transaction=new_transaction,
read_time=read_time,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("database", database)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["batch_get_documents"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
java | protected void zSetData(MithraDataObject data)
{
this.currentData = data;
this.persistenceState = PersistenceState.PERSISTED;
MithraTransaction currentTransaction = MithraManagerProvider.getMithraManager().getCurrentTransaction();
if (currentTransaction != null && zGetPortal().getTxParticipationMode(currentTransaction).mustParticipateInTxOnRead())
{
this.transactionalState = currentTransaction.getReadLockedTransactionalState(null, PersistenceState.PERSISTED);
}
} |
python | def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
initialPositionInStream, checkpointInterval,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
"""
Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
jduration = ssc._jduration(checkpointInterval)
try:
# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
_print_missing_jar(
"Streaming's Kinesis",
"streaming-kinesis-asl",
"streaming-kinesis-asl-assembly",
ssc.sparkContext.version)
raise
jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
regionName, initialPositionInStream, jduration, jlevel,
awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
stsSessionName, stsExternalId)
stream = DStream(jstream, ssc, NoOpSerializer())
return stream.map(lambda v: decoder(v)) |
java | final ServiceController<?> addRelativePathService(final ServiceTarget serviceTarget, final String pathName, final String path,
final boolean possiblyAbsolute, final String relativeTo) {
if (possiblyAbsolute && AbstractPathService.isAbsoluteUnixOrWindowsPath(path)) {
return addAbsolutePathService(serviceTarget, pathName, path);
} else {
return RelativePathService.addService(AbstractPathService.pathNameOf(pathName), path, possiblyAbsolute, relativeTo, serviceTarget);
}
} |
python | def merge(iterable1, *args):
"""
Returns an type of iterable1 value, which merged after iterable1 used *args
:exception TypeError: if any parameter type of args not equals type(iterable1)
Example 1:
source = ['a', 'b', 'c']
result = merge(source, [1, 2, 3])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3])
result = merge(source, [1, 2, 3], ['x', 'y', 'z'])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'])
Example 2:
source = 'abc'
result = merge(source, '123')
self.assertEqual(result, 'abc123')
result = merge(source, '123', 'xyz')
self.assertEqual(result, 'abc123xyz')
Example 3:
source = ('a', 'b', 'c')
result = merge(source, (1, 2, 3))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3))
result = merge(source, (1, 2, 3), ('x', 'y', 'z'))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'))
Example 4:
source = {'a': 1, 'b': 2, 'c': 3}
result = merge(source, {'x': 'm', 'y': 'n'}, {'z': '1'})
self.assertEqual(result, {'a': 1, 'b': 2, 'c': 3, 'x': 'm', 'y': 'n', 'z': '1'})
"""
result_list = list(iterable1) if not isinstance(iterable1, dict) else eval('list(iterable1.items())')
for i, other in enumerate(args, start=1):
if not isinstance(other, type(iterable1)):
raise TypeError('the parameter type of index {} not equals type of index 0'.format(i))
if not isinstance(other, dict):
result_list[len(result_list):len(result_list)] = list(other)
else:
result_list[len(result_list):len(result_list)] = list(other.items())
if isinstance(iterable1, str):
return ''.join(result_list)
elif isinstance(iterable1, tuple):
return tuple(result_list)
elif isinstance(iterable1, dict):
return dict(result_list)
else:
return result_list |
java | private void checkSpaceConstrain(long logFileSize) {
long spaceNeeded;
if (maxRepositorySize > 0) {
synchronized(fileList) {
initFileList(false);
long purgeSize = totalSize + logFileSize - maxRepositorySize;
if (debugLogger.isLoggable(Level.FINE) && LogRepositoryBaseImpl.isDebugEnabled()) {
debugLogger.logp(Level.FINE, thisClass, "checkSpaceConstrain", "total: "+totalSize+
" maxLog: "+logFileSize+" maxRepos: "+maxRepositorySize) ;
}
if (purgeSize > 0) {
purgeOldFiles(purgeSize);
}
spaceNeeded = maxRepositorySize - totalSize;
}
} else {
spaceNeeded = MAX_LOG_FILE_SIZE; // If no limit ensure that at least one log file can be written
}
LogRepositorySpaceAlert.getInstance().setRepositoryInfo(this, repositoryLocation, spaceNeeded) ;
} |
python | def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2013-06-28 - Started - Bovy (IAS)
"""
r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)/r*(r/self.rc*special.gamma(1.-self.alpha/2.)*special.gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-special.gamma(1.5-self.alpha/2.)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)) |
python | def take(
self,
indices: Sequence[int],
allow_fill: bool = False,
fill_value: Any = None
) -> ABCExtensionArray:
"""
Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
See Also
--------
numpy.take
pandas.api.extensions.take
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self) |
python | def xmlrpc_notify(self, app_id, token_or_token_list, aps_dict_or_list):
""" Sends push notifications to the Apple APNS server. Multiple
notifications can be sent by sending pairing the token/notification
arguments in lists [token1, token2], [notification1, notification2].
Arguments:
app_id provisioned app_id to send to
token_or_token_list token to send the notification or a list of tokens
aps_dict_or_list notification dicts or a list of notifications
Returns:
None
"""
d = self.apns_service(app_id).write(
encode_notifications(
[t.replace(' ', '') for t in token_or_token_list]
if (type(token_or_token_list) is list)
else token_or_token_list.replace(' ', ''),
aps_dict_or_list))
if d:
def _finish_err(r):
# so far, the only error that could really become of this
# request is a timeout, since APNS simply terminates connectons
# that are made unsuccessfully, which twisted will try endlessly
# to reconnect to, we timeout and notifify the client
raise xmlrpc.Fault(500, 'Connection to the APNS server could not be made.')
return d.addCallbacks(lambda r: None, _finish_err) |
java | public void organizationName_service_exchangeService_account_primaryEmailAddress_protocol_PUT(String organizationName, String exchangeService, String primaryEmailAddress, OvhExchangeAccountProtocol body) throws IOException {
String qPath = "/email/exchange/{organizationName}/service/{exchangeService}/account/{primaryEmailAddress}/protocol";
StringBuilder sb = path(qPath, organizationName, exchangeService, primaryEmailAddress);
exec(qPath, "PUT", sb.toString(), body);
} |
python | def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict") |
java | public void extractFundmental( DMatrixRMaj F21 , DMatrixRMaj F31 ) {
// compute the camera matrices one column at a time
for( int i = 0; i < 3; i++ ) {
DMatrixRMaj T = tensor.getT(i);
GeometryMath_F64.mult(T,e3,temp0);
GeometryMath_F64.cross(e2,temp0,column);
F21.set(0,i,column.x);
F21.set(1,i,column.y);
F21.set(2,i,column.z);
GeometryMath_F64.multTran(T,e2,temp0);
GeometryMath_F64.cross(e3,temp0,column);
F31.set(0,i,column.x);
F31.set(1,i,column.y);
F31.set(2,i,column.z);
}
} |
python | def get_thumbpath(self, path, key, format):
"""Return the relative path of the thumbnail.
path:
path of the source image
key:
key of the thumbnail
format:
thumbnail file extension
"""
relpath = os.path.dirname(path)
thumbsdir = self.get_thumbsdir(path)
name, _ = os.path.splitext(os.path.basename(path))
name = '{}.{}.{}'.format(name, key, format.lower())
return os.path.join(relpath, thumbsdir, name) |
python | def _format_quantum(val, unit):
"""
Format a quantity with reasonable units.
:param val: The value (just the value, not a quantity)
:param unit: Unit (something that can be fed to quanta).
:return: A string representation of this quantity.
>>> _format_quantum(3, 'm')
"3 m"
>>> _format_quantum(4914741782.503475, 's')
"4.91474e+09 s"
"""
q=quantity(val,unit)
if q.canonical().get_unit() in ['rad','s']:
return quantity(val, 'm').formatted()[:-1]+unit
else:
return q.formatted() |
java | public static WindowOver<Double> regrAvgy(Expression<? extends Number> arg1, Expression<? extends Number> arg2) {
return new WindowOver<Double>(Double.class, SQLOps.REGR_AVGY, arg1, arg2);
} |
python | def nlmsg_put(n, pid, seq, type_, payload, flags):
"""Add a Netlink message header to a Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L503
Adds or overwrites the Netlink message header in an existing message object.
Positional arguments:
n -- Netlink message (nl_msg class instance).
pid -- Netlink process id or NL_AUTO_PID (c_uint32).
seq -- sequence number of message or NL_AUTO_SEQ (c_uint32).
type_ -- message type (integer).
payload -- length of message payload (integer).
flags -- message flags (integer).
Returns:
nlmsghdr class instance or None.
"""
if n.nm_nlh.nlmsg_len < libnl.linux_private.netlink.NLMSG_HDRLEN:
raise BUG
nlh = n.nm_nlh
nlh.nlmsg_type = type_
nlh.nlmsg_flags = flags
nlh.nlmsg_pid = pid
nlh.nlmsg_seq = seq
_LOGGER.debug('msg 0x%x: Added netlink header type=%d, flags=%d, pid=%d, seq=%d', id(n), type_, flags, pid, seq)
if payload > 0 and nlmsg_reserve(n, payload, libnl.linux_private.netlink.NLMSG_ALIGNTO) is None:
return None
return nlh |
python | def update_firewall_policy(self, firewall_policy, body=None):
"""Updates a firewall policy."""
return self.put(self.firewall_policy_path % (firewall_policy),
body=body) |
python | def minimum_address(self):
"""The minimum address of the data, or ``None`` if the file is empty.
"""
minimum_address = self._segments.minimum_address
if minimum_address is not None:
minimum_address //= self.word_size_bytes
return minimum_address |
python | def _sample_cell(args, cell_body):
"""Implements the BigQuery sample magic for sampling queries
The supported sytanx is:
%%bq sample <args>
[<inline SQL>]
Args:
args: the optional arguments following '%%bq sample'.
cell_body: optional contents of the cell
Returns:
The results of executing the sampling query, or a profile of the sample data.
"""
env = google.datalab.utils.commands.notebook_environment()
config = google.datalab.utils.commands.parse_config(cell_body, env, False) or {}
parameters = config.get('parameters') or []
if parameters:
jsonschema.validate({'parameters': parameters}, BigQuerySchema.QUERY_PARAMS_SCHEMA)
query = None
table = None
view = None
query_params = None
if args['query']:
query = google.datalab.utils.commands.get_notebook_item(args['query'])
if query is None:
raise Exception('Cannot find query %s.' % args['query'])
query_params = get_query_parameters(args, cell_body)
elif args['table']:
table_name = google.datalab.bigquery.Query.resolve_parameters(args['table'], parameters)
table = _get_table(table_name)
if not table:
raise Exception('Could not find table %s' % args['table'])
elif args['view']:
view = google.datalab.utils.commands.get_notebook_item(args['view'])
if not isinstance(view, bigquery.View):
raise Exception('Could not find view %s' % args['view'])
else:
raise Exception('A query, table, or view is neede to sample')
# parse comma-separated list of fields
fields = args['fields'].split(',') if args['fields'] else None
count = int(args['count']) if args['count'] else None
percent = int(args['percent']) if args['percent'] else None
sampling = Sampling._auto(method=args['method'], fields=fields, count=count, percent=percent,
key_field=args['key_field'], ascending=(args['order'] == 'ascending'))
context = google.datalab.utils._utils._construct_context_for_args(args)
if view:
query = bigquery.Query.from_view(view)
elif table:
query = bigquery.Query.from_table(table)
if args['profile']:
results = query.execute(QueryOutput.dataframe(), sampling=sampling,
context=context, query_params=query_params).result()
else:
results = query.execute(QueryOutput.table(), sampling=sampling, context=context,
query_params=query_params).result()
if args['verbose']:
print(query.sql)
if args['profile']:
return google.datalab.utils.commands.profile_df(results)
else:
return results |
java | public void unsubscribeFromViewEvent(Class<? extends SystemEvent> systemEvent,
SystemEventListener listener) {
if (systemEvent == null) {
throw new NullPointerException();
}
if (listener == null) {
throw new NullPointerException();
}
if (viewListeners != null) {
List<SystemEventListener> listeners = viewListeners.get(systemEvent);
if (listeners != null) {
listeners.remove(listener);
}
}
} |
java | public static String docToString1(Document dom) {
StringWriter sw = new StringWriter();
DOM2Writer.serializeAsXML(dom, sw);
return sw.toString();
} |
java | public static byte[] generateRandomUUIDBytes()
{
if (rand == null)
rand = new SecureRandom();
byte[] buffer = new byte[16];
rand.nextBytes(buffer);
// Set version to 3 (Random)
buffer[6] = (byte) ((buffer[6] & 0x0f) | 0x40);
// Set variant to 2 (IETF)
buffer[8] = (byte) ((buffer[8] & 0x3f) | 0x80);
return buffer;
} |
java | @BetaApi
public final Operation simulateMaintenanceEventInstance(ProjectZoneInstanceName instance) {
SimulateMaintenanceEventInstanceHttpRequest request =
SimulateMaintenanceEventInstanceHttpRequest.newBuilder()
.setInstance(instance == null ? null : instance.toString())
.build();
return simulateMaintenanceEventInstance(request);
} |
python | async def kickban(self, channel, target, reason=None, range=0):
"""
Kick and ban user from channel.
"""
await self.ban(channel, target, range)
await self.kick(channel, target, reason) |
java | public static void joinUninterruptibly(Thread toJoin,
long timeout, TimeUnit unit) {
Preconditions.checkNotNull(toJoin);
boolean interrupted = false;
try {
long remainingNanos = unit.toNanos(timeout);
long end = System.nanoTime() + remainingNanos;
while (true) {
try {
// TimeUnit.timedJoin() treats negative timeouts just like zero.
NANOSECONDS.timedJoin(toJoin, remainingNanos);
return;
} catch (InterruptedException e) {
interrupted = true;
remainingNanos = end - System.nanoTime();
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
} |
python | def stub(base_class=None, **attributes):
"""creates a python class on-the-fly with the given keyword-arguments
as class-attributes accessible with .attrname.
The new class inherits from
Use this to mock rather than stub.
"""
if base_class is None:
base_class = object
members = {
"__init__": lambda self: None,
"__new__": lambda *args, **kw: object.__new__(
*args, *kw
), # remove __new__ and metaclass behavior from object
"__metaclass__": None,
}
members.update(attributes)
# let's create a python class on-the-fly :)
return type(f"{base_class.__name__}Stub", (base_class,), members)() |
python | def calculate_dates(self, dt):
"""
Given a dt, find that day's close and period start (close - offset).
"""
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
# Align the market close time here with the execution time used by the
# simulation clock. This ensures that scheduled functions trigger at
# the correct times.
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end |
java | public Set<DuracloudUser> getAccountUsers(AccountInfo account) {
DuracloudRightsRepo rightsRepo = repoMgr.getRightsRepo();
List<AccountRights> acctRights =
rightsRepo.findByAccountId(account.getId());
Set<DuracloudUser> users = new HashSet<>();
for (AccountRights rights : acctRights) {
DuracloudUser user = rights.getUser();
// make sure only the rights for this account are set
user.getAccountRights().clear();
user.getAccountRights().add(rights);
users.add(user);
}
return users;
} |
java | public static Class getArrayClass(Class c) {
if (c.getComponentType().isArray())
return getArrayClass(c.getComponentType());
else
return c.getComponentType();
} |
java | public SQLException exceptionWithQuery(String sql, SQLException sqlException,
boolean explicitClosed) {
if (explicitClosed) {
return new SQLException(
"Connection has explicitly been closed/aborted.\nQuery is: " + subQuery(sql),
sqlException.getSQLState(),
sqlException.getErrorCode(), sqlException.getCause());
}
if (options.dumpQueriesOnException || sqlException.getErrorCode() == 1064) {
return new SQLException(sqlException.getMessage()
+ "\nQuery is: " + subQuery(sql)
+ "\njava thread: " + Thread.currentThread().getName(),
sqlException.getSQLState(),
sqlException.getErrorCode(), sqlException.getCause());
}
return sqlException;
} |
java | private void throwExIntParam(MethodVisitor mv, Class<?> exCls) {
String exSig = Type.getInternalName(exCls);
mv.visitTypeInsn(NEW, exSig);
mv.visitInsn(DUP);
mv.visitLdcInsn("mapping " + this.className + " failed to map field:");
mv.visitVarInsn(ILOAD, 2);
mv.visitMethodInsn(INVOKESTATIC, "java/lang/Integer", "toString", "(I)Ljava/lang/String;");
mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/String", "concat", "(Ljava/lang/String;)Ljava/lang/String;");
mv.visitMethodInsn(INVOKESPECIAL, exSig, "<init>", "(Ljava/lang/String;)V");
mv.visitInsn(ATHROW);
} |
java | private Pair<EnumFacing, Point> getClosest(List<Pair<EnumFacing, Point>> points)
{
double distance = Double.MAX_VALUE;
Pair<EnumFacing, Point> ret = null;
for (Pair<EnumFacing, Point> pair : points)
{
double d = Point.distanceSquared(src, pair.getRight());
if (distance > d)
{
distance = d;
ret = pair;
}
}
return ret;
} |
java | private boolean checkIfThenConditionsMet(List<Integer> rGroupNumbers, List<Integer[]> distributions) {
for (int outer = 0; outer < rGroupNumbers.size(); outer++) {
int rgroupNum = rGroupNumbers.get(outer);
if (allZeroArray(distributions.get(outer))) {
for (int inner = 0; inner < rGroupNumbers.size(); inner++) {
int rgroupNum2 = rGroupNumbers.get(inner);
if (!allZeroArray(distributions.get(inner))) {
RGroupList rgrpList = rGroupDefinitions.get(rgroupNum2);
if (rgrpList.getRequiredRGroupNumber() == rgroupNum) {
logger.info(" Rejecting >> all 0 for " + rgroupNum + " but requirement found from "
+ rgrpList.getRGroupNumber());
return false;
}
}
}
}
}
return true;
} |
python | def stop(self, force=False, wait=False):
"""
Terminate all VMs in this cluster and delete its repository.
:param bool force:
remove cluster from storage even if not all nodes could be stopped.
"""
log.debug("Stopping cluster `%s` ...", self.name)
failed = self._stop_all_nodes(wait)
if failed:
if force:
self._delete_saved_data()
log.warning(
"Not all cluster nodes have been terminated."
" However, as requested, data about the cluster"
" has been removed from local storage.")
else:
self.repository.save_or_update(self)
log.warning(
"Not all cluster nodes have been terminated."
" Fix errors above and re-run `elasticluster stop %s`",
self.name)
else:
self._delete_saved_data() |
python | def login(self, username, password=None, email=None, registry=None, reauth=False, **kwargs):
"""
Login to a Docker registry server.
:param username: User name for login.
:type username: unicode | str
:param password: Login password; may be ``None`` if blank.
:type password: unicode | str
:param email: Optional; email address for login.
:type email: unicode | str
:param registry: Optional registry URL to log in to. Uses the Docker index by default.
:type registry: unicode | str
:param reauth: Re-authenticate, even if the login has been successful before.
:type reauth: bool
:param kwargs: Additional kwargs to :meth:`docker.client.Client.login`.
:return: ``True`` if the login has succeeded, or if it has not been necessary as it succeeded before. ``False``
otherwise.
:rtype: bool
"""
response = super(DockerClientWrapper, self).login(username, password, email, registry, reauth=reauth, **kwargs)
return response.get('Status') == 'Login Succeeded' or response.get('username') == username |
python | def match_similar(base, items):
"""Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None
"""
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity) # TODO: make O(n)
return None |
python | def wait_until_element_attribute_is(self, element, attribute, value, timeout=None):
"""Search element and wait until the requested attribute contains the expected value
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param attribute: attribute belonging to the element
:param value: expected value for the attribute of the element
:param timeout: max time to wait
:returns: the web element if the element's attribute contains the expected value
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element's attribute does not contain the expected value after the timeout
"""
return self._wait_until(self._expected_condition_value_in_element_attribute, (element, attribute, value), timeout) |
python | def send_feature_report(self, data, report_id=0x00):
"""
Send a Feature report to a HID device.
Feature reports are sent over the Control endpoint as a Set_Report
transfer.
Parameters:
data The data to send
Returns:
This function returns the actual number of bytes written
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
report = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(report))
bytes_written = hidapi.hid_send_feature_report(self._device, cdata, len(report))
if bytes_written == -1:
raise HIDException("Failed to send feature report to HID device")
return bytes_written |
java | protected void initMembers() {
synchronized (LOCK) {
m_resourceInitHandlers = new ArrayList<I_CmsResourceInit>();
m_requestHandlers = new HashMap<String, I_CmsRequestHandler>();
m_systemInfo = new CmsSystemInfo();
m_exportPoints = Collections.emptySet();
m_defaultUsers = new CmsDefaultUsers();
m_localeManager = new CmsLocaleManager(Locale.ENGLISH);
m_sessionManager = new CmsSessionManager();
m_runtimeProperties = new Hashtable<Object, Object>();
// the default event manager must be available because the configuration already registers events
m_eventManager = new CmsEventManager();
// default link manager is required for test cases
m_linkManager = new CmsLinkManager(new CmsDefaultLinkSubstitutionHandler());
}
} |
java | public void setFloating(ICalProperty property, boolean enable) {
if (enable) {
floatingProperties.add(property);
} else {
removeIdentity(floatingProperties, property);
}
} |
java | private void sendIdentify(WebSocket websocket) {
ObjectNode identifyPacket = JsonNodeFactory.instance.objectNode()
.put("op", GatewayOpcode.IDENTIFY.getCode());
ObjectNode data = identifyPacket.putObject("d");
String token = api.getPrefixedToken();
data.put("token", token)
.put("compress", true)
.put("large_threshold", 250)
.putObject("properties")
.put("$os", System.getProperty("os.name"))
.put("$browser", "Javacord")
.put("$device", "Javacord")
.put("$referrer", "")
.put("$referring_domain", "");
if (api.getTotalShards() > 1) {
data.putArray("shard").add(api.getCurrentShard()).add(api.getTotalShards());
}
// remove eventually still registered listeners
synchronized (identifyFrameListeners) {
websocket.removeListeners(identifyFrameListeners);
identifyFrameListeners.clear();
}
WebSocketFrame identifyFrame = WebSocketFrame.createTextFrame(identifyPacket.toString());
lastSentFrameWasIdentify.set(identifyFrame, false);
WebSocketAdapter identifyFrameListener = new WebSocketAdapter() {
@Override
public void onFrameSent(WebSocket websocket, WebSocketFrame frame) {
if (lastSentFrameWasIdentify.isMarked()) {
// sending non-heartbeat frame after identify was sent => unset mark
if (!nextHeartbeatFrame.compareAndSet(frame, null)) {
lastSentFrameWasIdentify.set(null, false);
websocket.removeListener(this);
identifyFrameListeners.remove(this);
}
} else {
// identify frame is actually sent => set the mark
if (lastSentFrameWasIdentify.compareAndSet(frame, null, false, true)) {
lastIdentificationPerAccount.put(token, System.currentTimeMillis());
connectionDelaySemaphorePerAccount.get(token).release();
}
}
}
};
identifyFrameListeners.add(identifyFrameListener);
websocket.addListener(identifyFrameListener);
logger.debug("Sending identify packet");
websocket.sendFrame(identifyFrame);
} |
java | public static CommerceAccountOrganizationRel fetchByCommerceAccountId_First(
long commerceAccountId,
OrderByComparator<CommerceAccountOrganizationRel> orderByComparator) {
return getPersistence()
.fetchByCommerceAccountId_First(commerceAccountId,
orderByComparator);
} |
java | private void squaresToPositionList() {
this.positionPatterns.reset();
List<DetectPolygonFromContour.Info> infoList = squareDetector.getPolygonInfo();
for (int i = 0; i < infoList.size(); i++) {
DetectPolygonFromContour.Info info = infoList.get(i);
// The test below has been commented out because the new external only contour
// detector discards all information related to internal contours
// squares with no internal contour cannot possibly be a finder pattern
// if( !info.hasInternal() )
// continue;
// See if the appearance matches a finder pattern
double grayThreshold = (info.edgeInside+info.edgeOutside)/2;
if( !checkPositionPatternAppearance(info.polygon,(float)grayThreshold))
continue;
// refine the edge estimate
squareDetector.refine(info);
PositionPatternNode pp = this.positionPatterns.grow();
pp.reset();
pp.square = info.polygon;
pp.grayThreshold = grayThreshold;
graph.computeNodeInfo(pp);
}
} |
java | private void discoverContent(Activity activity) {
discoveryRepeatCnt_ = 0;
if (discoveredViewList_.size() < cdManifest_.getMaxViewHistorySize()) { // check if max discovery views reached
handler_.removeCallbacks(readContentRunnable);
lastActivityReference_ = new WeakReference<>(activity);
handler_.postDelayed(readContentRunnable, VIEW_SETTLE_TIME);
}
} |
java | private static List<UsbDevice> collect(final UsbDevice device)
{
final List<UsbDevice> l = new ArrayList<>();
if (device.isUsbHub())
getAttachedDevices((UsbHub) device).forEach(d -> l.addAll(collect(d)));
else
l.add(device);
return l;
} |
python | def create_model(self, role=None, image=None, predictor_cls=None, serializer=None, deserializer=None,
content_type=None, accept=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, **kwargs):
"""
Create a model to deploy.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``, which is also used during
transform jobs. If not specified, the role from the Estimator will be used.
image (str): An container image to use for deploying the model. Defaults to the image used for training.
predictor_cls (RealTimePredictor): The predictor class to use when deploying the model.
serializer (callable): Should accept a single argument, the input data, and return a sequence
of bytes. May provide a content_type attribute that defines the endpoint request content type
deserializer (callable): Should accept two arguments, the result data and the response content type,
and return a sequence of bytes. May provide a content_type attribute that defines th endpoint
response Accept content type.
content_type (str): The invocation ContentType, overriding any content_type from the serializer
accept (str): The invocation Accept, overriding any accept from the deserializer.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
The serializer, deserializer, content_type, and accept arguments are only used to define a default
RealTimePredictor. They are ignored if an explicit predictor class is passed in. Other arguments
are passed through to the Model class.
Returns: a Model ready for deployment.
"""
if predictor_cls is None:
def predict_wrapper(endpoint, session):
return RealTimePredictor(endpoint, session, serializer, deserializer, content_type, accept)
predictor_cls = predict_wrapper
role = role or self.role
return Model(self.model_data, image or self.train_image(), role,
vpc_config=self.get_vpc_config(vpc_config_override),
sagemaker_session=self.sagemaker_session, predictor_cls=predictor_cls, **kwargs) |
java | public Wife addWifeToFamily(final Family family, final Person person) {
if (family == null || person == null) {
return new Wife();
}
final FamS famS = new FamS(person, "FAMS",
new ObjectId(family.getString()));
final Wife wife = new Wife(family, "Wife",
new ObjectId(person.getString()));
family.insert(wife);
person.insert(famS);
return wife;
} |
java | private SerializationPolicy getSerializationPolicy(CmsObject cms) {
boolean online = cms.getRequestContext().getCurrentProject().isOnlineProject();
if (online && (m_serPolicyOnline != null)) {
return m_serPolicyOnline;
} else if (!online && (m_serPolicyOffline != null)) {
return m_serPolicyOffline;
}
SerializationPolicy serializationPolicy = null;
// Open the RPC resource file and read its contents
InputStream is = null;
try {
// check if this is a static resource request
if (m_serializationPolicyPath.startsWith(OpenCms.getSystemInfo().getStaticResourceContext())) {
URL resourceURL = CmsStaticResourceHandler.getStaticResourceURL(m_serializationPolicyPath);
URLConnection connection;
connection = resourceURL.openConnection();
is = connection.getInputStream();
} else {
// try reading from the RFS
String rfsPath = m_serializationPolicyPath;
if (rfsPath.startsWith(OpenCms.getSystemInfo().getContextPath())) {
rfsPath = rfsPath.substring(OpenCms.getSystemInfo().getContextPath().length());
}
rfsPath = CmsStringUtil.joinPaths(OpenCms.getSystemInfo().getWebApplicationRfsPath(), rfsPath);
File policyFile = new File(rfsPath);
if (policyFile.exists() && policyFile.canRead()) {
is = new FileInputStream(policyFile);
} else {
// the file does not exist in the RFS, try the VFS
String policyPath = OpenCms.getLinkManager().getRootPath(cms, m_serializationPolicyPath);
is = new ByteArrayInputStream(cms.readFile(policyPath).getContents());
}
}
} catch (Exception ex) {
// most likely file not found
String message = "ERROR: The serialization policy file '"
+ m_serializationPolicyPath
+ "' was not found; did you forget to include it in this deployment?";
LOG.warn(message);
LOG.warn(ex.getLocalizedMessage(), ex);
}
if (is == null) {
return new CmsDummySerializationPolicy();
}
// read the policy
try {
serializationPolicy = SerializationPolicyLoader.loadFromStream(is, null);
} catch (ParseException e) {
LOG.error("ERROR: Failed to parse the policy file '" + m_serializationPolicyPath + "'", e);
} catch (IOException e) {
LOG.error("ERROR: Could not read the policy file '" + m_serializationPolicyPath + "'", e);
} finally {
try {
is.close();
} catch (@SuppressWarnings("unused") IOException e) {
// Ignore this error
}
}
if (online) {
m_serPolicyOnline = serializationPolicy;
} else {
m_serPolicyOffline = serializationPolicy;
}
return serializationPolicy;
} |
java | public void itemStateChanged(ItemEvent e) {
if (e.getStateChange() == ItemEvent.SELECTED) {
int index = comboBox.getSelectedIndex();
if ((index >= 0) && (index != month)) {
setMonth(index, false);
}
}
} |
python | def serialize_to_json(result, unpicklable=False):
"""Serializes output as JSON and writes it to console output wrapped with special prefix and suffix
:param result: Result to return
:param unpicklable: If True adds JSON can be deserialized as real object.
When False will be deserialized as dictionary
"""
json = jsonpickle.encode(result, unpicklable=unpicklable)
result_for_output = str(json)
return result_for_output |
java | public void setTextRotationAlignment(@Property.TEXT_ROTATION_ALIGNMENT String value) {
PropertyValue propertyValue = textRotationAlignment(value);
constantPropertyUsageMap.put(PROPERTY_TEXT_ROTATION_ALIGNMENT, propertyValue);
layer.setProperties(propertyValue);
} |
python | def to_igraph(self, attribute="weight", **kwargs):
"""Convert to an igraph Graph
Uses the igraph.Graph.Weighted_Adjacency constructor
Parameters
----------
attribute : str, optional (default: "weight")
kwargs : additional arguments for igraph.Graph.Weighted_Adjacency
"""
try:
import igraph as ig
except ImportError:
raise ImportError("Please install igraph with "
"`pip install --user python-igraph`.")
try:
W = self.W
except AttributeError:
# not a pygsp graph
W = self.K.copy()
W = utils.set_diagonal(W, 0)
return ig.Graph.Weighted_Adjacency(utils.to_dense(W).tolist(),
attr=attribute, **kwargs) |
python | def p_operation_definition4(self, p):
"""
operation_definition : operation_type name selection_set
"""
p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2]) |
python | def _add_text_size_ngrams(self, text_id, size, ngrams):
"""Adds `ngrams`, that are of size `size`, to the data store.
The added `ngrams` are associated with `text_id`.
:param text_id: database ID of text associated with `ngrams`
:type text_id: `int`
:param size: size of n-grams
:type size: `int`
:param ngrams: n-grams to be added
:type ngrams: `collections.Counter`
"""
unique_ngrams = len(ngrams)
self._logger.info('Adding {} unique {}-grams'.format(
unique_ngrams, size))
parameters = [[text_id, ngram, size, count]
for ngram, count in ngrams.items()]
with self._conn:
self._conn.execute(constants.INSERT_TEXT_HAS_NGRAM_SQL,
[text_id, size, unique_ngrams])
self._conn.executemany(constants.INSERT_NGRAM_SQL, parameters) |
python | def set_proxy(self, proxy):
""" Sets a HTTPS proxy to query the Twitter API
:param proxy: A string of containing a HTTPS proxy \
e.g. ``set_proxy("my.proxy.com:8080")``.
:raises: TwitterSearchException
"""
if isinstance(proxy, str if py3k else basestring):
self.__proxy = proxy
else:
raise TwitterSearchException(1009) |
python | def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False):
'''
Compares two version numbers. Accepts a custom function to perform the
cmp-style version comparison, otherwise uses version_cmp().
'''
cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),
'>=': (0, 1), '>': (1,)}
if oper not in ('!=',) and oper not in cmp_map:
log.error('Invalid operator \'%s\' for version comparison', oper)
return False
if cmp_func is None:
cmp_func = version_cmp
cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)
if cmp_result is None:
return False
# Check if integer/long
if not isinstance(cmp_result, numbers.Integral):
log.error('The version comparison function did not return an '
'integer/long.')
return False
if oper == '!=':
return cmp_result not in cmp_map['==']
else:
# Gracefully handle cmp_result not in (-1, 0, 1).
if cmp_result < -1:
cmp_result = -1
elif cmp_result > 1:
cmp_result = 1
return cmp_result in cmp_map[oper] |
java | public ServiceFuture<PublicIPPrefixInner> beginUpdateTagsAsync(String resourceGroupName, String publicIpPrefixName, final ServiceCallback<PublicIPPrefixInner> serviceCallback) {
return ServiceFuture.fromResponse(beginUpdateTagsWithServiceResponseAsync(resourceGroupName, publicIpPrefixName), serviceCallback);
} |
python | def estimate(self,param,burn=None,clip=10.0,alpha=0.32):
""" Estimate parameter value and uncertainties """
# FIXME: Need to add age and metallicity to composite isochrone params (currently properties)
if param not in list(self.samples.names) + list(self.source.params) + ['age','metallicity']:
msg = 'Unrecognized parameter: %s'%param
raise KeyError(msg)
# If the parameter is in the samples
if param in self.samples.names:
if param.startswith('position_angle'):
return self.estimate_position_angle(param,burn=burn,
clip=clip,alpha=alpha)
return self.samples.peak_interval(param,burn=burn,clip=clip,alpha=alpha)
mle = self.get_mle()
errors = [np.nan,np.nan]
# Set default value to the MLE value
if param in self.source.params:
err = self.source.params[param].errors
if err is not None: errors = err
# For age and metallicity from composite isochrone
return [float(mle[param]),errors] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.