language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @Override
public ListProvisionedCapacityResult listProvisionedCapacity(ListProvisionedCapacityRequest request) {
request = beforeClientExecution(request);
return executeListProvisionedCapacity(request);
} |
java | public java.util.List<VirtualMFADevice> getVirtualMFADevices() {
if (virtualMFADevices == null) {
virtualMFADevices = new com.amazonaws.internal.SdkInternalList<VirtualMFADevice>();
}
return virtualMFADevices;
} |
python | def rest_post(url, data, timeout, show_error=False):
'''Call rest post method'''
try:
response = requests.post(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\
data=data, timeout=timeout)
return response
except Exception as exception:
if show_error:
print_error(exception)
return None |
java | public static void main(final String[] args) {
ByteArray8Holder b1 = new ByteArray8Holder(new byte[] {
0, 0, 0, 0, 0, 0, 0, 1
});
ByteArray8Holder b2 = new ByteArray8Holder(new byte[] {
0, 0, 0, 0, 0, 0, 0, 2
});
ByteArray8Holder b2b = new ByteArray8Holder(new byte[] {
0, 0, 0, 0, 0, 0, 0, 2
});
System.out.println("compareTo=" + Integer.valueOf(0).compareTo(Integer.valueOf(1)));
System.out.println("b1.compareTo(b2)=" + b1.compareTo(b2));
System.out.println("b1.equals(b2)=" + b1.equals(b2));
System.out.println("b2.equals(b2b)=" + b2.equals(b2b));
System.out.println("b1.hashCode()=" + b1.hashCode());
System.out.println("b2.hashCode()=" + b2.hashCode());
System.out.println("b2b.hashCode()=" + b2b.hashCode());
System.out.println("b2b.toString()=" + b2b.toString());
} |
python | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') |
java | private static <V> Collection<V> sorted(Iterable<V> source, Comparator<V> comparator, int size)
{
V[] vs = (V[]) new Object[size];
int i = 0;
for (V v : source)
vs[i++] = v;
Arrays.sort(vs, comparator);
return Arrays.asList(vs);
} |
python | def name_scope(name=None):
"""
This decorator wraps a function so that it runs inside a TensorFlow
name scope. The name is given by the `name` option; if this is None,
then the name of the function will be used.
```
>>> @name_scope()
>>> def foo(...):
>>> # now runs inside scope "foo"
>>> @name_scope('bar')
>>> def baz(...):
>>> # now runs inside scope "bar", not "baz"
```
"""
def name_scope_wrapper_decorator(method):
@functools.wraps(method)
def name_scope_wrapper(*args, **kwargs):
scope_name = name if name is not None else method.__name__
with tf.name_scope(scope_name):
return method(*args, **kwargs)
return name_scope_wrapper
return name_scope_wrapper_decorator |
java | public <T> InjectorBuilder forEachElement(ElementVisitor<T> visitor) {
Elements
.getElements(module)
.forEach(element -> element.acceptVisitor(visitor));
return this;
} |
java | public void setPersistentAttributes(Map<String, Object> persistentAttributes) {
if (persistenceAdapter == null) {
throw new IllegalStateException("Attempting to set persistence attributes without configured persistence adapter");
}
this.persistentAttributes = persistentAttributes;
persistenceAttributesSet = true;
} |
python | def p_encaps_var_dollar_curly_array_offset(p):
'encaps_var : DOLLAR_OPEN_CURLY_BRACES STRING_VARNAME LBRACKET expr RBRACKET RBRACE'
p[0] = ast.ArrayOffset(ast.Variable('$' + p[2], lineno=p.lineno(2)), p[4],
lineno=p.lineno(3)) |
python | def make_error(self,
message: str,
*,
error: Exception = None,
# ``error_class: Type[Exception]=None`` doesn't work on
# Python 3.5.2, but that is exact version ran by Read the
# Docs :( More info: http://stackoverflow.com/q/42942867
error_class: Any = None) -> Exception:
"""Return error instantiated from given message.
:param message: Message to wrap.
:param error: Validation error.
:param error_class:
Special class to wrap error message into. When omitted
``self.error_class`` will be used.
"""
if error_class is None:
error_class = self.error_class if self.error_class else Error
return error_class(message) |
java | public IssueCategory createCategory(IssueCategory category) throws RedmineException {
if (category.getProject() == null
|| category.getProject().getId() == null) {
throw new IllegalArgumentException(
"IssueCategory must contain an existing project");
}
return transport.addChildEntry(Project.class, category.getProject()
.getId().toString(), category);
} |
python | def form(self, request, tag):
"""
Render the inputs for a form.
@param tag: A tag with:
- I{form} and I{description} slots
- I{liveform} and I{subform} patterns, to fill the I{form} slot
- An I{inputs} slot, to fill with parameter views
- L{IParameterView.patternName}I{-input-container} patterns for
each parameter type in C{self.parameters}
"""
patterns = PatternDictionary(self.docFactory)
inputs = []
for parameter in self.parameters:
view = parameter.viewFactory(parameter, None)
if view is not None:
view.setDefaultTemplate(
tag.onePattern(view.patternName + '-input-container'))
setFragmentParent = getattr(view, 'setFragmentParent', None)
if setFragmentParent is not None:
setFragmentParent(self)
inputs.append(view)
else:
inputs.append(_legacySpecialCases(self, patterns, parameter))
if self.subFormName is None:
pattern = tag.onePattern('liveform')
else:
pattern = tag.onePattern('subform')
return dictFillSlots(
tag,
dict(form=pattern.fillSlots('inputs', inputs),
description=self._getDescription())) |
java | public DialSeries addSeries(String seriesName, double value, String annotation) {
// Sanity checks
sanityCheck(seriesName, value);
DialSeries series = new DialSeries(seriesName, value, annotation);
seriesMap.put(seriesName, series);
return series;
} |
python | def configure_ckan(m):
"""Load groups and organizations, from a file in Metatab format"""
from ckanapi import RemoteCKAN
try:
doc = MetapackDoc(m.mt_file, cache=m.cache)
except (IOError, MetatabError) as e:
err("Failed to open metatab '{}': {}".format(m.mt_file, e))
c = RemoteCKAN(m.ckan_url, apikey=m.api_key)
groups = { g['name']:g for g in c.action.group_list(all_fields=True) }
for g in doc['Groups']:
if g.value not in groups:
prt('Creating group: ', g.value)
c.action.group_create(name=g.value,
title=g.get_value('title'),
description=g.get_value('description'),
id=g.get_value('id'),
image_url=g.get_value('image_url'))
orgs = {o['name']: o for o in c.action.organization_list(all_fields=True)}
for o in doc['Organizations']:
if o.value not in orgs:
prt('Creating organization: ', o.value)
c.action.organization_create(name=o.value,
title=o.get_value('title'),
description=o.get_value('description'),
id=o.get_value('id'),
image_url=o.get_value('image_url')) |
python | def API_GET(self, courseid, taskid=None): # pylint: disable=arguments-differ
"""
List tasks available to the connected client. Returns a dict in the form
::
{
"taskid1":
{
"name": "Name of the course", #the name of the course
"authors": [],
"deadline": "",
"status": "success" # can be "succeeded", "failed" or "notattempted"
"grade": 0.0,
"grade_weight": 0.0,
"context": "" # context of the task, in RST
"problems": # dict of the subproblems
{
# see the format of task.yaml for the content of the dict. Contains everything but
# responses of multiple-choice and match problems.
}
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id/tasks/the_task_id, this dict will contain one entry or the page will return 404 Not
Found.
"""
try:
course = self.course_factory.get_course(courseid)
except:
raise APINotFound("Course not found")
if not self.user_manager.course_is_open_to_user(course, lti=False):
raise APIForbidden("You are not registered to this course")
if taskid is None:
tasks = course.get_tasks()
else:
try:
tasks = {taskid: course.get_task(taskid)}
except:
raise APINotFound("Task not found")
output = []
for taskid, task in tasks.items():
task_cache = self.user_manager.get_task_cache(self.user_manager.session_username(), task.get_course_id(), task.get_id())
data = {
"id": taskid,
"name": task.get_name(self.user_manager.session_language()),
"authors": task.get_authors(self.user_manager.session_language()),
"deadline": task.get_deadline(),
"status": "notviewed" if task_cache is None else "notattempted" if task_cache["tried"] == 0 else "succeeded" if task_cache["succeeded"] else "failed",
"grade": task_cache.get("grade", 0.0) if task_cache is not None else 0.0,
"grade_weight": task.get_grading_weight(),
"context": task.get_context(self.user_manager.session_language()).original_content(),
"problems": []
}
for problem in task.get_problems():
pcontent = problem.get_original_content()
pcontent["id"] = problem.get_id()
if pcontent["type"] == "match":
del pcontent["answer"]
if pcontent["type"] == "multiple_choice":
pcontent["choices"] = {key: val["text"] for key, val in enumerate(pcontent["choices"])}
pcontent = self._check_for_parsable_text(pcontent)
data["problems"].append(pcontent)
output.append(data)
return 200, output |
python | def str_if_not_none(value):
"""
Returns an str(value) if the value is not None.
:param value: None or a value that can be converted to a str.
:return: None or str(value)
"""
if not(value is None or isinstance(value, string_types)):
value = str(value)
return value |
java | @Override
public DescriptorValue calculate(IAtomContainer atomContainer) {
IAtomContainer ac;
try {
ac = (IAtomContainer) atomContainer.clone();
} catch (CloneNotSupportedException e) {
return getDummyDescriptorValue(e);
}
List<String> profiles = new ArrayList<String>();
// calculate the set of all rings
IRingSet rs;
try {
rs = (new AllRingsFinder()).findAllRings(ac);
} catch (CDKException e) {
return getDummyDescriptorValue(e);
}
// check aromaticity if the descriptor parameter is set to true
if (checkAromaticity) {
try {
AtomContainerManipulator.percieveAtomTypesAndConfigureAtoms(ac);
Aromaticity.cdkLegacy().apply(ac);
} catch (CDKException e) {
return getDummyDescriptorValue(e);
}
}
// iterate over all atoms of ac
for (IAtom atom : ac.atoms()) {
if (atom.getSymbol().equals("N") || atom.getSymbol().equals("O") || atom.getSymbol().equals("S")
|| atom.getSymbol().equals("P")) {
int singleBondCount = 0;
int doubleBondCount = 0;
int tripleBondCount = 0;
int aromaticBondCount = 0;
double maxBondOrder = 0;
double bondOrderSum = 0;
int hCount = 0;
int isIn3MemberRing = 0;
// counting the number of single/double/triple/aromatic bonds
List<IBond> connectedBonds = ac.getConnectedBondsList(atom);
for (IBond connectedBond : connectedBonds) {
if (connectedBond.getFlag(CDKConstants.ISAROMATIC))
aromaticBondCount++;
else if (connectedBond.getOrder() == Order.SINGLE)
singleBondCount++;
else if (connectedBond.getOrder() == Order.DOUBLE)
doubleBondCount++;
else if (connectedBond.getOrder() == Order.TRIPLE) tripleBondCount++;
}
int formalCharge = atom.getFormalCharge();
List<IAtom> connectedAtoms = ac.getConnectedAtomsList(atom);
int numberOfNeighbours = connectedAtoms.size();
// EXPLICIT hydrogens: count the number of hydrogen atoms
for (int neighbourIndex = 0; neighbourIndex < numberOfNeighbours; neighbourIndex++)
if (((IAtom) connectedAtoms.get(neighbourIndex)).getSymbol().equals("H")) hCount++;
// IMPLICIT hydrogens: count the number of hydrogen atoms and adjust other atom profile properties
Integer implicitHAtoms = atom.getImplicitHydrogenCount();
if (implicitHAtoms == CDKConstants.UNSET) {
implicitHAtoms = 0;
}
for (int hydrogenIndex = 0; hydrogenIndex < implicitHAtoms; hydrogenIndex++) {
hCount++;
numberOfNeighbours++;
singleBondCount++;
}
// Calculate bond order sum using the counters of single/double/triple/aromatic bonds
bondOrderSum += singleBondCount * 1.0;
bondOrderSum += doubleBondCount * 2.0;
bondOrderSum += tripleBondCount * 3.0;
bondOrderSum += aromaticBondCount * 1.5;
// setting maxBondOrder
if (singleBondCount > 0) maxBondOrder = 1.0;
if (aromaticBondCount > 0) maxBondOrder = 1.5;
if (doubleBondCount > 0) maxBondOrder = 2.0;
if (tripleBondCount > 0) maxBondOrder = 3.0;
// isIn3MemberRing checker
if (rs.contains(atom)) {
IRingSet rsAtom = rs.getRings(atom);
for (int ringSetIndex = 0; ringSetIndex < rsAtom.getAtomContainerCount(); ringSetIndex++) {
IRing ring = (IRing) rsAtom.getAtomContainer(ringSetIndex);
if (ring.getRingSize() == 3) isIn3MemberRing = 1;
}
}
// create a profile of the current atom (atoms[atomIndex]) according to the profile definition in the constructor
String profile = atom.getSymbol() + "+" + maxBondOrder + "+" + bondOrderSum + "+" + numberOfNeighbours
+ "+" + hCount + "+" + formalCharge + "+" + aromaticBondCount + "+" + isIn3MemberRing + "+"
+ singleBondCount + "+" + doubleBondCount + "+" + tripleBondCount;
//logger.debug("tpsa profile: "+ profile);
profiles.add(profile);
}
}
// END OF ATOM LOOP
// calculate the tpsa for the AtomContainer ac
double tpsa = 0;
for (int profileIndex = 0; profileIndex < profiles.size(); profileIndex++) {
if (map.containsKey(profiles.get(profileIndex))) {
tpsa += (Double) map.get(profiles.get(profileIndex));
//logger.debug("tpsa contribs: " + profiles.elementAt(profileIndex) + "\t" + ((Double)map.get(profiles.elementAt(profileIndex))).doubleValue());
}
}
profiles.clear(); // remove all profiles from the profiles-Vector
//logger.debug("tpsa: " + tpsa);
return new DescriptorValue(getSpecification(), getParameterNames(), getParameters(), new DoubleResult(tpsa),
getDescriptorNames());
} |
python | def iddtxt2groups(txt):
"""extract the groups from the idd file"""
try:
txt = txt.decode('ISO-8859-2')
except AttributeError as e:
pass # for python 3
txt = nocomment(txt, '!')
txt = txt.replace("\\group", "!-group") # retains group in next line
txt = nocomment(txt, '\\') # remove all other idd info
lines = txt.splitlines()
lines = [line.strip() for line in lines] # cleanup
lines = [line for line in lines if line != ''] # cleanup
txt = '\n'.join(lines)
gsplits = txt.split('!') # split into groups, since we have !-group
gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group
gsplits[0].insert(0, None)
# Put None for the first group that does nothave a group name
gdict = {}
for gsplit in gsplits:
gdict.update({gsplit[0]:gsplit[1:]})
# makes dict {groupname:[k1, k2], groupname2:[k3, k4]}
gdict = {k:'\n'.join(v) for k, v in gdict.items()}# joins lines back
gdict = {k:v.split(';') for k, v in gdict.items()} # splits into idfobjects
gdict = {k:[i.strip() for i in v] for k, v in gdict.items()} # cleanup
gdict = {k:[i.splitlines() for i in v] for k, v in gdict.items()}
# splits idfobjects into lines
gdict = {k:[i for i in v if len(i) > 0] for k, v in gdict.items()}
# cleanup - removes blank lines
gdict = {k:[i[0] for i in v] for k, v in gdict.items()} # use first line
gdict = {k:[i.split(',')[0] for i in v] for k, v in gdict.items()}
# remove ','
nvalue = gdict.pop(None) # remove group with no name
gdict = {k[len('-group '):]:v for k, v in gdict.items()} # get group name
gdict.update({None:nvalue}) # put back group with no name
return gdict |
python | def _split_cell(cell, module):
""" Split a hybrid %%sql cell into the Python code and the queries.
Populates a module with the queries.
Args:
cell: the contents of the %%sql cell.
module: the module that the contents will populate.
Returns:
The default (last) query for the module.
"""
lines = cell.split('\n')
code = None
last_def = -1
name = None
define_wild_re = re.compile('^DEFINE\s+.*$', re.IGNORECASE)
define_re = re.compile('^DEFINE\s+QUERY\s+([A-Z]\w*)\s*?(.*)$', re.IGNORECASE)
select_re = re.compile('^SELECT\s*.*$', re.IGNORECASE)
standard_sql_re = re.compile('^(CREATE|WITH|INSERT|DELETE|UPDATE)\s*.*$', re.IGNORECASE)
# TODO(gram): a potential issue with this code is if we have leading Python code followed
# by a SQL-style comment before we see SELECT/DEFINE. When switching to the tokenizer see
# if we can address this.
for i, line in enumerate(lines):
define_match = define_re.match(line)
select_match = select_re.match(line)
standard_sql_match = standard_sql_re.match(line)
if i:
prior_content = ''.join(lines[:i]).strip()
if select_match:
# Avoid matching if previous token was '(' or if Standard SQL is found
# TODO: handle the possibility of comments immediately preceding SELECT
select_match = len(prior_content) == 0 or \
(prior_content[-1] != '(' and not standard_sql_re.match(prior_content))
if standard_sql_match:
standard_sql_match = len(prior_content) == 0 or not standard_sql_re.match(prior_content)
if define_match or select_match or standard_sql_match:
# If this is the first query, get the preceding Python code.
if code is None:
code = ('\n'.join(lines[:i])).strip()
if len(code):
code += '\n'
elif last_def >= 0:
# This is not the first query, so gather the previous query text.
query = '\n'.join([line for line in lines[last_def:i] if len(line)]).strip()
if select_match and name != datalab.data._utils._SQL_MODULE_MAIN and len(query) == 0:
# Avoid DEFINE query name\nSELECT ... being seen as an empty DEFINE followed by SELECT
continue
# Save the query
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
# And set the 'last' query to be this too
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
# Get the query name and strip off our syntactic sugar if appropriate.
if define_match:
name = define_match.group(1)
lines[i] = define_match.group(2)
else:
name = datalab.data._utils._SQL_MODULE_MAIN
# Save the starting line index of the new query
last_def = i
else:
define_wild_match = define_wild_re.match(line)
if define_wild_match:
raise Exception('Expected "DEFINE QUERY <name>"')
if last_def >= 0:
# We were in a query so save this tail query.
query = '\n'.join([line for line in lines[last_def:] if len(line)]).strip()
statement = datalab.data.SqlStatement(query, module)
module.__dict__[name] = statement
module.__dict__[datalab.data._utils._SQL_MODULE_LAST] = statement
if code is None:
code = ''
module.__dict__[datalab.data._utils._SQL_MODULE_ARGPARSE] = _arguments(code, module)
return module.__dict__.get(datalab.data._utils._SQL_MODULE_LAST, None) |
python | def execute(self, eopatch):
"""
:param eopatch: Input EOPatch.
:type eopatch: EOPatch
:return: Transformed eo patch
:rtype: EOPatch
"""
feature_type, feature_name = next(self.feature(eopatch))
good_idxs = self._get_filtered_indices(eopatch[feature_type][feature_name] if feature_name is not ... else
eopatch[feature_type])
for feature_type, feature_name in self.filter_features(eopatch):
if feature_type.is_time_dependent():
if feature_type.has_dict():
if feature_type.contains_ndarrays():
eopatch[feature_type][feature_name] = np.asarray([eopatch[feature_type][feature_name][idx] for
idx in good_idxs])
# else:
# NotImplemented
else:
eopatch[feature_type] = [eopatch[feature_type][idx] for idx in good_idxs]
self._update_other_data(eopatch)
return eopatch |
python | def from_prev_calc(cls, prev_calc_dir, standardize=False, sym_prec=0.1,
international_monoclinic=True, reciprocal_density=100,
small_gap_multiply=None, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
standardize (float): Whether to standardize to a primitive
standard cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding. If not 0,
the final structure from the previous run will be symmetrized
to get a primitive standard cell. Set to 0 if you don't want
that.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
\\*\\*kwargs: All kwargs supported by MPStaticSet,
other than prev_incar and prev_structure and prev_kpoints which
are determined from the prev_calc_dir.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
prev_incar = vasprun.incar
prev_kpoints = vasprun.kpoints
if standardize:
warnings.warn("Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure. copy_chgcar is enforced to be false.")
# We will make a standard structure for the given symprec.
prev_structure = get_structure_from_prev_run(
vasprun, outcar, sym_prec=standardize and sym_prec,
international_monoclinic=international_monoclinic)
# multiply the reciprocal density if needed:
if small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= small_gap_multiply[0]:
reciprocal_density = reciprocal_density * small_gap_multiply[1]
return cls(
structure=prev_structure, prev_incar=prev_incar,
prev_kpoints=prev_kpoints,
reciprocal_density=reciprocal_density, **kwargs) |
python | def __spawn_new_request(self):
"""Spawn the first queued request if there is one available.
Returns:
bool: True if a new request was spawned, false otherwise.
"""
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
while self.routing.is_treshold_reached(first_in_line.request):
self.queue.move(first_in_line, QueueItem.STATUS_CANCELLED)
first_in_line = self.queue.get_first(QueueItem.STATUS_QUEUED)
if first_in_line is None:
return False
self.__request_start(first_in_line)
return True |
python | def load_family_details(self, pheno_covar):
"""Load family data updating the pheno_covar with family ids found.
:param pheno_covar: Phenotype/covariate object
:return: None
"""
file = open(self.fam_details)
header = file.readline()
format = file.readline()
self.file_index = 0
mask_components = [] # 1s indicate an individual is to be masked out
for line in file:
words = line.strip().split()
indid = ":".join(words[0:2])
if DataParser.valid_indid(indid):
mask_components.append(0)
sex = int(words[5])
pheno = float(words[6])
pheno_covar.add_subject(indid, sex, pheno)
else:
mask_components.append(1)
mask_components = numpy.array(mask_components)
self.ind_mask = numpy.zeros(len(mask_components) * 2, dtype=numpy.int8).reshape(-1, 2)
self.ind_mask[0:, 0] = mask_components
self.ind_mask[0:, 1] = mask_components
self.ind_count = self.ind_mask.shape[0]
pheno_covar.freeze_subjects() |
python | def get_gamma_value(self):
'''
getter
Gamma value.
'''
if isinstance(self.__gamma_value, float) is False:
raise TypeError("The type of __gamma_value must be float.")
return self.__gamma_value |
python | def load_drp(self, name, entry_point='numina.pipeline.1'):
"""Load all available DRPs in 'entry_point'."""
for drpins in self.iload(entry_point):
if drpins.name == name:
return drpins
else:
raise KeyError('{}'.format(name)) |
python | def uniform(start, end=None, periods=None, freq=None, sc=None):
"""
Instantiates a uniform DateTimeIndex.
Either end or periods must be specified.
Parameters
----------
start : string, long (nanos from epoch), or Pandas Timestamp
end : string, long (nanos from epoch), or Pandas Timestamp
periods : int
freq : a frequency object
sc : SparkContext
"""
dtmodule = sc._jvm.com.cloudera.sparkts.__getattr__('DateTimeIndex$').__getattr__('MODULE$')
if freq is None:
raise ValueError("Missing frequency")
elif end is None and periods == None:
raise ValueError("Need an end date or number of periods")
elif end is not None:
return DateTimeIndex(dtmodule.uniformFromInterval( \
datetime_to_nanos(start), datetime_to_nanos(end), freq._jfreq))
else:
return DateTimeIndex(dtmodule.uniform( \
datetime_to_nanos(start), periods, freq._jfreq)) |
java | public static Collection<MonetaryRounding> getRoundings(RoundingQuery roundingQuery) {
return Optional.ofNullable(monetaryRoundingsSingletonSpi()).orElseThrow(
() -> new MonetaryException("No MonetaryRoundingsSpi loaded, query functionality is not available."))
.getRoundings(roundingQuery);
} |
java | @VisibleForTesting
public static void transcodeJpegWithExifOrientation(
final InputStream inputStream,
final OutputStream outputStream,
final int exifOrientation,
final int scaleNumerator,
final int quality)
throws IOException {
NativeJpegTranscoderSoLoader.ensure();
Preconditions.checkArgument(scaleNumerator >= MIN_SCALE_NUMERATOR);
Preconditions.checkArgument(scaleNumerator <= MAX_SCALE_NUMERATOR);
Preconditions.checkArgument(quality >= MIN_QUALITY);
Preconditions.checkArgument(quality <= MAX_QUALITY);
Preconditions.checkArgument(JpegTranscoderUtils.isExifOrientationAllowed(exifOrientation));
Preconditions.checkArgument(
scaleNumerator != SCALE_DENOMINATOR || exifOrientation != ExifInterface.ORIENTATION_NORMAL,
"no transformation requested");
nativeTranscodeJpegWithExifOrientation(
Preconditions.checkNotNull(inputStream),
Preconditions.checkNotNull(outputStream),
exifOrientation,
scaleNumerator,
quality);
} |
java | public void setBuffer(WsByteBuffer buf) {
if (buf != null) {
this.buffers = this.defaultBuffers;
this.buffers[0] = buf;
} else {
this.buffers = null;
}
} |
python | def ensure_schema(client, table_name):
"""
Create the table/columnfamily if it doesn't already exist.
:param client: A Cassandra CQL client
:type client: silverberg.client.CQLClient
:param lock_table: A table/columnfamily table name for holding locks.
:type lock_table: str
"""
query = ''.join([
'CREATE TABLE {cf} ',
'("lockId" ascii, "claimId" timeuuid, PRIMARY KEY("lockId", "claimId"));'])
def errback(failure):
failure.trap(InvalidRequestException)
return client.execute(query.format(cf=table_name),
{}, ConsistencyLevel.QUORUM).addErrback(errback) |
java | private void executeUpdatesSynchronous(DBTransaction transaction) {
BatchStatement batchState = new BatchStatement(Type.UNLOGGED);
batchState.addAll(getMutations(transaction));
executeBatch(batchState);
} |
java | public int getDelay()
{
if (scheduledGraceful != null)
return (int)scheduledGraceful.getDelay(TimeUnit.SECONDS);
if (shutdown.get())
return Integer.MIN_VALUE;
return Integer.MAX_VALUE;
} |
java | @Override
protected void beforeWaitForSynchronization(final T message) throws CouldNotPerformException {
transactionIdField = ProtoBufFieldProcessor.getFieldDescriptor(message, TransactionIdProvider.TRANSACTION_ID_FIELD_NAME);
if (transactionIdField == null) {
throw new NotAvailableException("transaction id field for message[" + message.getClass().getSimpleName() + "]");
}
if (transactionIdField.getType() != Type.UINT64) {
throw new CouldNotPerformException("Transaction id field of message[" + message.getClass().getSimpleName() + "] has an unexpected type[" + transactionIdField.getType().name() + "]");
}
} |
python | def get_viewer(self, v_id, viewer_class=None, width=512, height=512,
force_new=False):
"""
Get an existing viewer by viewer id. If the viewer does not yet
exist, make a new one.
"""
if not force_new:
try:
return self.viewers[v_id]
except KeyError:
pass
# create top level window
window = self.app.make_window("Viewer %s" % v_id, wid=v_id)
# We get back a record with information about the viewer
v_info = self.make_viewer(window, viewer_class=viewer_class,
width=width, height=height)
# Save it under this viewer id
self.viewers[v_id] = v_info
return v_info |
python | def send_messages(self, messages):
"""Send one or more EmailMessage objects.
Returns:
int: Number of email messages sent.
"""
if not messages:
return
new_conn_created = self.open()
if not self.connection:
# We failed silently on open(). Trying to send would be pointless.
return
num_sent = 0
for message in messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent |
python | def get_parser():
"""Initialize the parser for the command line interface and bind the
autocompletion functionality"""
# initialize the parser
parser = argparse.ArgumentParser(
description=(
'Command line tool for extracting text from any document. '
) % locals(),
)
# define the command line options here
parser.add_argument(
'filename', help='Filename to extract text.',
).completer = argcomplete.completers.FilesCompleter
parser.add_argument(
'-e', '--encoding', type=str, default=DEFAULT_ENCODING,
choices=_get_available_encodings(),
help='Specify the encoding of the output.',
)
parser.add_argument(
'--extension', type=str, default=None,
choices=_get_available_extensions(),
help='Specify the extension of the file.',
)
parser.add_argument(
'-m', '--method', default='',
help='Specify a method of extraction for formats that support it',
)
parser.add_argument(
'-o', '--output', type=FileType('wb'), default='-',
help='Output raw text in this file',
)
parser.add_argument(
'-O', '--option', type=str, action=AddToNamespaceAction,
help=(
'Add arbitrary options to various parsers of the form '
'KEYWORD=VALUE. A full list of available KEYWORD options is '
'available at http://bit.ly/textract-options'
),
)
parser.add_argument(
'-v', '--version', action='version', version='%(prog)s '+VERSION,
)
# enable autocompletion with argcomplete
argcomplete.autocomplete(parser)
return parser |
python | def norm_int_dict(int_dict):
"""Normalizes values in the given dict with int values.
Parameters
----------
int_dict : list
A dict object mapping each key to an int value.
Returns
-------
dict
A dict where each key is mapped to its relative part in the sum of
all dict values.
Example
-------
>>> dict_obj = {'a': 3, 'b': 5, 'c': 2}
>>> result = norm_int_dict(dict_obj)
>>> print(sorted(result.items()))
[('a', 0.3), ('b', 0.5), ('c', 0.2)]
"""
norm_dict = int_dict.copy()
val_sum = sum(norm_dict.values())
for key in norm_dict:
norm_dict[key] = norm_dict[key] / val_sum
return norm_dict |
java | public void insert( int index , int value ) {
if( size == data.length ) {
int temp[] = new int[ size * 2];
System.arraycopy(data,0,temp,0,index);
temp[index] = value;
System.arraycopy(data,index,temp,index+1,size-index);
this.data = temp;
size++;
} else {
size++;
for( int i = size-1; i > index; i-- ) {
data[i] = data[i-1];
}
data[index] = value;
}
} |
python | def gradient(self):
"""Compute the gradient of the energy for all atoms"""
result = np.zeros((self.numc, 3), float)
for index1 in range(self.numc):
result[index1] = self.gradient_component(index1)
return result |
java | public PutRemediationConfigurationsResult withFailedBatches(FailedRemediationBatch... failedBatches) {
if (this.failedBatches == null) {
setFailedBatches(new com.amazonaws.internal.SdkInternalList<FailedRemediationBatch>(failedBatches.length));
}
for (FailedRemediationBatch ele : failedBatches) {
this.failedBatches.add(ele);
}
return this;
} |
python | def workflow_overwrite(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/overwrite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2Foverwrite
"""
return DXHTTPRequest('/%s/overwrite' % object_id, input_params, always_retry=always_retry, **kwargs) |
python | def on_packet_received(self, pkt):
"""DEV: entry point. Will be called by sniff() for each
received packet (that passes the filters).
"""
if not pkt:
return
if self.store:
self.lst.append(pkt)
if self.prn:
result = self.prn(pkt)
if result is not None:
print(result) |
java | public void resumeConsumer(int suspendFlag)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "resumeConsumer", this);
ArrayList<AORequestedTick> satisfiedTicks = null;
synchronized(parent) // AOStream
{
try
{
// Take the lock
this.lock();
try
{
if (_consumerSuspended)
{
// clear the bit provided in the _suspendFlags
_suspendFlags &= ~suspendFlag;
if (_suspendFlags == 0) // No flags set so resume the consumer
{
_consumerSuspended = false;
// If the consumer is still active (started) we need
// to kickstart the consumer back into life to check for more
// messages
if(!closed)
satisfiedTicks = processQueuedMsgs(null);
}
}
}
finally
{
this.unlock();
}
}
catch(SINotPossibleInCurrentConfigurationException e)
{
// No FFDC code needed
notifyException(e);
}
if (satisfiedTicks!=null)
{
// inform parent about satisfied ticks - outside lock
int length = satisfiedTicks.size();
for (int i = 0; i < length; i++)
{
AORequestedTick aotick = (AORequestedTick) satisfiedTicks.get(i);
parent.satisfiedRequest(aotick.tick, aotick.getMessage());
}
}
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "resumeConsumer");
} |
python | def shuffle(args):
"""
%prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format.
"""
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = opts.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq |
java | public static Specification<JpaTarget> hasAssignedDistributionSet(final Long distributionSetId) {
return (targetRoot, query, cb) -> cb.equal(
targetRoot.<JpaDistributionSet> get(JpaTarget_.assignedDistributionSet).get(JpaDistributionSet_.id),
distributionSetId);
} |
java | public boolean setHiddenInputValue(String idOrName, String value) {
T element = findElement(By.id(idOrName));
if (element == null) {
element = findElement(By.name(idOrName));
if (element != null) {
executeJavascript("document.getElementsByName('%s')[0].value='%s'", idOrName, value);
}
} else {
executeJavascript("document.getElementById('%s').value='%s'", idOrName, value);
}
return element != null;
} |
java | public static void main(final String[] args) {
Switch about = new Switch("a", "about", "display about message");
Switch help = new Switch("h", "help", "display help message");
FileArgument expectedFile = new FileArgument("e", "expected-file", "expected interpretation file, default stdin; at least one of expected or observed file must be provided", false);
FileArgument observedFile = new FileArgument("b", "observed-file", "observed interpretation file, default stdin; at least one of expected or observed file must be provided", false);
FileArgument outputFile = new FileArgument("o", "output-file", "output file, default stdout", false);
IntegerArgument resolution = new IntegerArgument("r", "resolution", "resolution, must be in the range [1..4], default " + DEFAULT_RESOLUTION, false);
StringListArgument loci = new StringListArgument("l", "loci", "list of loci to validate, default " + DEFAULT_LOCI, false);
Switch printSummary = new Switch("s", "summary", "print summary");
ArgumentList arguments = new ArgumentList(about, help, expectedFile, observedFile, outputFile, resolution, loci, printSummary);
CommandLine commandLine = new CommandLine(args);
ValidateInterpretation validateInterpretation = null;
try
{
CommandLineParser.parse(commandLine, arguments);
if (about.wasFound()) {
About.about(System.out);
System.exit(0);
}
if (help.wasFound()) {
Usage.usage(USAGE, null, commandLine, arguments, System.out);
System.exit(0);
}
// todo: allow for configuration of glclient
validateInterpretation = new ValidateInterpretation(expectedFile.getValue(), observedFile.getValue(), outputFile.getValue(), resolution.getValue(DEFAULT_RESOLUTION), loci.getValue(DEFAULT_LOCI), printSummary.wasFound(), LocalGlClient.create());
}
catch (CommandLineParseException e) {
if (about.wasFound()) {
About.about(System.out);
System.exit(0);
}
if (help.wasFound()) {
Usage.usage(USAGE, null, commandLine, arguments, System.out);
System.exit(0);
}
Usage.usage(USAGE, e, commandLine, arguments, System.err);
System.exit(-1);
}
try {
System.exit(validateInterpretation.call());
}
catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
} |
python | def find_by_type_or_id(type_or_id, prs):
"""
:param type_or_id: Type of the data to process or ID of the processor class
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return:
A list of processor classes to process files of given data type or
processor 'type_or_id' found by its ID
:raises: UnknownProcessorTypeError
"""
def pred(pcls):
"""Predicate"""
return pcls.cid() == type_or_id or pcls.type() == type_or_id
pclss = findall_with_pred(pred, prs)
if not pclss:
raise UnknownProcessorTypeError(type_or_id)
return pclss |
java | static <EqClassT, LeftProvT, RightProvT> EquivalenceBasedProvenancedAlignment<EqClassT, LeftProvT, RightProvT> fromEquivalenceClassMaps(
final Multimap<? extends EqClassT, ? extends LeftProvT> leftEquivalenceClassesToProvenances,
final Multimap<? extends EqClassT, ? extends RightProvT> rightEquivalenceClassesToProvenances) {
return new EquivalenceBasedProvenancedAlignment<EqClassT, LeftProvT, RightProvT>(
leftEquivalenceClassesToProvenances, rightEquivalenceClassesToProvenances);
} |
java | public Path getPathInHar(Path path) {
Path harPath = new Path(path.toUri().getPath());
if (archivePath.compareTo(harPath) == 0)
return new Path(Path.SEPARATOR);
Path tmp = new Path(harPath.getName());
Path parent = harPath.getParent();
while (!(parent.compareTo(archivePath) == 0)) {
if (parent.toString().equals(Path.SEPARATOR)) {
tmp = null;
break;
}
tmp = new Path(parent.getName(), tmp);
parent = parent.getParent();
}
if (tmp != null)
tmp = new Path(Path.SEPARATOR, tmp);
return tmp;
} |
java | public void setSessionProperty(String name, String value)
{
requireNonNull(name, "name is null");
requireNonNull(value, "value is null");
checkArgument(!name.isEmpty(), "name is empty");
CharsetEncoder charsetEncoder = US_ASCII.newEncoder();
checkArgument(name.indexOf('=') < 0, "Session property name must not contain '=': %s", name);
checkArgument(charsetEncoder.canEncode(name), "Session property name is not US_ASCII: %s", name);
checkArgument(charsetEncoder.canEncode(value), "Session property value is not US_ASCII: %s", value);
sessionProperties.put(name, value);
} |
java | public AdvancedNetworkConfig setClientEndpointConfig(ServerSocketEndpointConfig serverSocketEndpointConfig) {
serverSocketEndpointConfig.setProtocolType(ProtocolType.CLIENT);
endpointConfigs.put(CLIENT, serverSocketEndpointConfig);
return this;
} |
java | public static Class<?> classForNameWithException(final String name, final ClassLoader cl)
throws ClassNotFoundException {
if (cl != null) {
try {
return Class.forName(name, false, cl);
} catch (final ClassNotFoundException | NoClassDefFoundError e) {
// fall through and try with default classloader
}
}
return Class.forName(name);
} |
python | async def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
Note: this method is a coroutine.
"""
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset() |
java | public static String separatorsToSystem(final String path)
{
if (path == null)
{
return null;
}
if (isSystemWindows())
{
return separatorsToWindows(path);
}
else
{
return separatorsToUnix(path);
}
} |
python | def calculeToday(self):
"""Calcule the intervals from the last date."""
self.__logger.debug("Add today")
last = datetime.datetime.strptime(self.__lastDay, "%Y-%m-%d")
today = datetime.datetime.now().date()
self.__validInterval(last, today) |
java | private void removeComponentWithoutException(Type role, String hint)
{
try {
removeComponent(role, hint);
} catch (Exception e) {
this.logger.warn("Instance released but disposal failed. Some resources may not have been released.", e);
}
} |
python | def is_password_valid(password):
"""
Check if a password is valid
"""
pattern = re.compile(r"^.{4,75}$")
return bool(pattern.match(password)) |
python | def user_data(self, access_token, *args, **kwargs):
"""Load user data from OAuth Profile Google App Engine App"""
url = GOOGLE_APPENGINE_PROFILE_V1
auth = self.oauth_auth(access_token)
return self.get_json(url,
auth=auth, params=auth
) |
python | def eig_one_step(current_vector, learning_rate, vector_prod_fn):
"""Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step
"""
grad = 2*vector_prod_fn(current_vector)
# Current objective = (1/2)*v^T (2*M*v); v = current_vector
# grad = 2*M*v
current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),
grad) / 2., shape=())
# Project the gradient into the tangent space of the constraint region.
# This way we do not waste time taking steps that try to change the
# norm of current_vector
grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)
grad_norm = tf.norm(grad)
grad_norm_sq = tf.square(grad_norm)
# Computing normalized gradient of unit norm
norm_grad = grad / grad_norm
# Computing directional second derivative (dsd)
# dsd = 2*g^T M g, where g is normalized gradient
directional_second_derivative = (
tf.reshape(2*tf.matmul(tf.transpose(norm_grad),
vector_prod_fn(norm_grad)),
shape=()))
# Computing grad^\top M grad [useful to compute step size later]
# Just a rescaling of the directional_second_derivative (which uses
# normalized gradient
grad_m_grad = directional_second_derivative*grad_norm_sq / 2
# Directional_second_derivative/2 = objective when vector is norm_grad
# If this is smaller than current objective, simply return that
if directional_second_derivative / 2. < current_objective:
return norm_grad
# If curvature is positive, jump to the bottom of the bowl
if directional_second_derivative > 0.:
step = -1. * grad_norm / directional_second_derivative
else:
# If the gradient is very small, do not move
if grad_norm_sq <= 1e-16:
step = 0.0
else:
# Make a heuristic guess of the step size
step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq
# Computing gain using the gradient and second derivative
gain = -(2 * tf.reduce_sum(current_vector*grad) +
(step*step) * grad_m_grad)
# Fall back to pre-determined learning rate if no gain
if gain < 0.:
step = -learning_rate * grad_norm
current_vector = current_vector + step * norm_grad
return tf.nn.l2_normalize(current_vector) |
java | public static void main(final String[] args) {
Switch about = new Switch("a", "about", "display about message");
Switch help = new Switch("h", "help", "display help message");
FileArgument fastaFile = new FileArgument("i", "fasta-file", "input FASTA file, default stdin", false);
StringArgument outputFilePrefix = new StringArgument("p", "output-file-prefix", "output file prefix, default \"\"", false);
StringArgument outputFileExtension = new StringArgument("x", "output-file-extension", "output file extension, default " + DEFAULT_OUTPUT_FILE_EXTENSION, false);
FileArgument outputDirectory = new FileArgument("d", "output-directory", "output directory, default .", false);
ArgumentList arguments = new ArgumentList(about, help, fastaFile, outputFilePrefix, outputFileExtension, outputDirectory);
CommandLine commandLine = new CommandLine(args);
SplitFasta splitFasta = null;
try
{
CommandLineParser.parse(commandLine, arguments);
if (about.wasFound()) {
About.about(System.out);
System.exit(0);
}
if (help.wasFound()) {
Usage.usage(USAGE, null, commandLine, arguments, System.out);
System.exit(0);
}
splitFasta = new SplitFasta(fastaFile.getValue(), outputFilePrefix.getValue(DEFAULT_OUTPUT_FILE_PREFIX), outputFileExtension.getValue(DEFAULT_OUTPUT_FILE_EXTENSION), outputDirectory.getValue(DEFAULT_OUTPUT_DIRECTORY));
}
catch (CommandLineParseException e) {
Usage.usage(USAGE, e, commandLine, arguments, System.err);
System.exit(-1);
}
try {
System.exit(splitFasta.call());
}
catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
} |
python | def interested_in(self):
"""
A list of strings describing the genders the user is interested in.
"""
genders = []
for gender in self.cache['interested_in']:
genders.append(gender)
return genders |
java | private void removeOldChannels()
{
Iterator<AsteriskChannelImpl> i;
synchronized (channels)
{
i = channels.values().iterator();
while (i.hasNext())
{
final AsteriskChannel channel = i.next();
final Date dateOfRemoval = channel.getDateOfRemoval();
if (channel.getState() == ChannelState.HUNGUP && dateOfRemoval != null)
{
final long diff = DateUtil.getDate().getTime() - dateOfRemoval.getTime();
if (diff >= REMOVAL_THRESHOLD)
{
i.remove();
}
}
}
}
} |
python | def report(mount):
'''
Report on quotas for a specific volume
CLI Example:
.. code-block:: bash
salt '*' quota.report /media/data
'''
ret = {mount: {}}
ret[mount]['User Quotas'] = _parse_quota(mount, '-u')
ret[mount]['Group Quotas'] = _parse_quota(mount, '-g')
return ret |
java | public static void scale(double[][] x, double lo, double hi) {
int n = x.length;
int p = x[0].length;
double[] min = colMin(x);
double[] max = colMax(x);
for (int j = 0; j < p; j++) {
double scale = max[j] - min[j];
if (!Math.isZero(scale)) {
for (int i = 0; i < n; i++) {
x[i][j] = (x[i][j] - min[j]) / scale;
}
} else {
for (int i = 0; i < n; i++) {
x[i][j] = 0.5;
}
}
}
} |
python | def _setup_postprocess_hds_timeseries(hds_file, df, config_file, prefix=None, model=None):
"""Dirty function to post process concentrations in inactive/dry cells"""
warnings.warn(
"Setting up post processing of hds or ucn timeseries obs. "
"Prepending 'pp' to obs name may cause length to exceed 20 chars", PyemuWarning)
if model is not None:
t_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.index.map(lambda x: "{0:08.2f}".format(x))
if prefix is not None:
prefix = "pp{0}".format(prefix)
else:
prefix = "pp"
ins_file = hds_file+"_timeseries.post_processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file,'w') as f:
f.write('pif ~\n')
f.write("l1 \n")
for t in t_str:
f.write("l1 w ")
for site in df.columns:
obsnme = "{0}{1}_{2}".format(prefix, site, t)
f.write(" !{0}!".format(obsnme))
f.write('\n')
frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format(config_file)
return frun_line |
python | def raw_urlsafe_b64encode(b):
'''Base64 encode using URL-safe encoding with padding removed.
@param b bytes to decode
@return bytes decoded
'''
b = to_bytes(b)
b = base64.urlsafe_b64encode(b)
b = b.rstrip(b'=') # strip padding
return b |
python | def set_wake_on_modem(enabled):
'''
Set whether or not the computer will wake from sleep when modem activity is
detected.
:param bool enabled: True to enable, False to disable. "On" and "Off" are
also acceptable values. Additionally you can pass 1 and 0 to represent
True and False respectively
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' power.set_wake_on_modem True
'''
state = salt.utils.mac_utils.validate_enabled(enabled)
cmd = 'systemsetup -setwakeonmodem {0}'.format(state)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(
state,
get_wake_on_modem,
) |
java | public static String getIP(final InetAddress inetAddress)
{
String ip = "";
ip = inetAddress.getHostAddress();
if (ip.equals(""))
{
final byte[] ipAddressInBytes = inetAddress.getAddress();
for (int i = 0; i < ipAddressInBytes.length; i++)
{
if (i > 0)
{
ip += ".";
}
ip += ipAddressInBytes[i] & 0xFF;
}
}
return ip;
} |
python | def explain_prediction(estimator, doc, **kwargs):
"""
Return an explanation of an estimator prediction.
:func:`explain_prediction` is not doing any work itself, it dispatches
to a concrete implementation based on estimator type.
Parameters
----------
estimator : object
Estimator instance. This argument must be positional.
doc : object
Example to run estimator on. Estimator makes a prediction for this
example, and :func:`explain_prediction` tries to show information
about this prediction. Pass a single element, not a one-element array:
if you fitted your estimator on ``X``, that would be ``X[i]`` for
most containers, and ``X.iloc[i]`` for ``pandas.DataFrame``.
top : int or (int, int) tuple, optional
Number of features to show. When ``top`` is int, ``top`` features with
a highest absolute values are shown. When it is (pos, neg) tuple,
no more than ``pos`` positive features and no more than ``neg``
negative features is shown. ``None`` value means no limit (default).
This argument may be supported or not, depending on estimator type.
top_targets : int, optional
Number of targets to show. When ``top_targets`` is provided,
only specified number of targets with highest scores are shown.
Negative value means targets with lowest scores are shown.
Must not be given with ``targets`` argument.
``None`` value means no limit: all targets are shown (default).
This argument may be supported or not, depending on estimator type.
target_names : list[str] or {'old_name': 'new_name'} dict, optional
Names of targets or classes. This argument can be used to provide
human-readable class/target names for estimators which don't expose
clss names themselves. It can be also used to rename estimator-provided
classes before displaying them.
This argument may be supported or not, depending on estimator type.
targets : list, optional
Order of class/target names to show. This argument can be also used
to show information only for a subset of classes. It should be a list
of class / target names which match either names provided by
an estimator or names defined in ``target_names`` parameter.
Must not be given with ``top_targets`` argument.
In case of binary classification you can use this argument to
set the class which probability or score should be displayed, with
an appropriate explanation. By default a result for predicted class
is shown. For example, you can use ``targets=[True]`` to always show
result for a positive class, even if the predicted label is False.
This argument may be supported or not, depending on estimator type.
feature_names : list, optional
A list of feature names. It allows to specify feature
names when they are not provided by an estimator object.
This argument may be supported or not, depending on estimator type.
feature_re : str, optional
Only feature names which match ``feature_re`` regex are returned
(more precisely, ``re.search(feature_re, x)`` is checked).
feature_filter : Callable[[str, float], bool], optional
Only feature names for which ``feature_filter`` function returns True
are returned. It must accept feature name and feature value.
Missing features always have a NaN value.
**kwargs: dict
Keyword arguments. All keyword arguments are passed to
concrete explain_prediction... implementations.
Returns
-------
Explanation
:class:`~.Explanation` result. Use one of the formatting functions from
:mod:`eli5.formatters` to print it in a human-readable form.
Explanation instances have repr which works well with
IPython notebook, but it can be a better idea to use
:func:`eli5.show_prediction` instead of :func:`eli5.explain_prediction`
if you work with IPython: :func:`eli5.show_prediction` allows to
customize formatting without a need to import :mod:`eli5.formatters`
functions.
"""
return Explanation(
estimator=repr(estimator),
error="estimator %r is not supported" % estimator,
) |
python | def write_ndarray(self, result, dst_paths, nodata=None, compress='lzw'):
"""Write results (ndarray) to disc."""
assert len(dst_paths) == result.shape[2]
assert result.shape[0] == self._height
assert result.shape[1] == self._width
assert result.shape[2] == len(dst_paths)
with rasterio.open(self._mrio._get_template_for_given_resolution(self._mrio.dst_res, "path")) as src_layer:
pass # later we need src_layer for src_layer.window_transform(win)
for i, pth in enumerate(dst_paths):
dst_path_chunk = self.get_chunk_path_from_layer_path(pth, self.ji)
result_layer_i = result[:, :, [i]]
assert result_layer_i.shape[2] == 1
kwargs = self._mrio._get_template_for_given_resolution(
res=self._mrio.dst_res, return_="meta").copy()
kwargs.update({"driver": "GTiff",
"compress": compress,
"nodata": nodata,
"height": self._height,
"width": self._width,
"dtype": result_layer_i.dtype,
"transform": src_layer.window_transform(self._window)})
with rasterio.open(dst_path_chunk, "w", **kwargs) as dst:
dst.write(result_layer_i[:, :, 0], 1) |
python | def get_block_operator(self):
"""Determine the immediate parent boolean operator for a filter"""
# Top level operator is `and`
block_stack = ['and']
for f in self.manager.iter_filters(block_end=True):
if f is None:
block_stack.pop()
continue
if f.type in ('and', 'or', 'not'):
block_stack.append(f.type)
if f == self:
break
return block_stack[-1] |
java | public static <T> Collector<T, ?, Optional<Long>> rank(T value, Comparator<? super T> comparator) {
return rankBy(value, t -> t, comparator);
} |
python | def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs.get(self.lookup_url_kwarg)
parent_lookup_value = view_kwargs.get(self.parent_lookup_field)
lookup_kwargs = {
self.lookup_field: lookup_value,
}
# Try to lookup parent attr
if parent_lookup_value:
lookup_kwargs.update({self.parent_lookup_field: parent_lookup_value})
return self.get_queryset().get(**lookup_kwargs) |
java | @Override
public boolean getPadding(Rect padding) {
padding.set(mPadding, mPadding, mPadding, mPadding);
return mPadding != 0;
} |
python | def unhandled_keys(self, size, key):
"""
Override this method to intercept keystrokes in subclasses.
Default behavior: Toggle flagged on space, ignore other keys.
"""
if key == " ":
if not self.flagged:
self.display.new_files.append(self.get_node().get_value())
else:
self.display.new_files.remove(self.get_node().get_value())
self.flagged = not self.flagged
self.update_w()
self.display.update_status()
else:
return key |
python | def wikipedia_search(query, lang="en", max_result=1):
"""
https://www.mediawiki.org/wiki/API:Opensearch
"""
query = any2unicode(query)
params = {
"action":"opensearch",
"search": query,
"format":"json",
#"formatversion":2,
#"namespace":0,
"suggest":"true",
"limit": 10
}
urlBase = "https://{}.wikipedia.org/w/api.php?".format(lang)
url = urlBase + urllib.urlencode(any2utf8(params))
#logging.info(url)
r = requests.get(url)
jsonData = json.loads(r.content)
#logging.info(jsonData)
items = []
ret = {"query":query, "itemList":items}
for idx, label in enumerate(jsonData[1][0:max_result]):
description = jsonData[2][idx]
url = jsonData[3][idx]
item = {
"name": label,
"description":description,
"url": url,
}
items.append(item)
return ret |
python | def _json_min(src, dest=''):
"""Minify JSON
Args:
src: json string or path-to-file with text to minify (mandatory)
dest: path-to-file to save minified xml string; (optional)
- if file doesn't exist it is created automatically;
- if this arg is skept function returns string
Returns: 1) minified JSON string if dest is not provided
2) length of saved file if dest is provided
Example:
json.min('path/to/file.json')
json.min('path/to/file.json', 'path/to/save/result.json')
"""
if dest == '':
return _json_min_exec(_text(src)) # returns string
else:
if type(dest) is bool: # dest is skept, custom pattern provided at dist place
return _json_min_exec(_text(src), dest)
else:
with open(dest, 'w') as f2:
return f2.write(_json_min_exec(_text(src))) |
java | public Object getPropertyValue(String propertyName) throws BeansException {
if (PropertyAccessorUtils.isIndexedProperty(propertyName)) {
return getIndexedPropertyValue(propertyName);
}
else {
return getSimplePropertyValue(propertyName);
}
} |
java | public void marshall(StageDeclaration stageDeclaration, ProtocolMarshaller protocolMarshaller) {
if (stageDeclaration == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(stageDeclaration.getName(), NAME_BINDING);
protocolMarshaller.marshall(stageDeclaration.getBlockers(), BLOCKERS_BINDING);
protocolMarshaller.marshall(stageDeclaration.getActions(), ACTIONS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def create_plateaus(data, edges, plateau_size, plateau_vals, plateaus=None):
'''Creates plateaus of constant value in the data.'''
nodes = set(edges.keys())
if plateaus is None:
plateaus = []
for i in range(len(plateau_vals)):
if len(nodes) == 0:
break
node = np.random.choice(list(nodes))
nodes.remove(node)
plateau = [node]
available = set(edges[node]) & nodes
while len(nodes) > 0 and len(available) > 0 and len(plateau) < plateau_size:
node = np.random.choice(list(available))
plateau.append(node)
available |= nodes & set(edges[node])
available.remove(node)
nodes -= set(plateau)
plateaus.append(set(plateau))
for p,v in zip(plateaus, plateau_vals):
data[np.array(list(p), dtype=int)] = v
return plateaus |
java | public Future<Channel> renegotiate(final Promise<Channel> promise) {
if (promise == null) {
throw new NullPointerException("promise");
}
ChannelHandlerContext ctx = this.ctx;
if (ctx == null) {
throw new IllegalStateException();
}
EventExecutor executor = ctx.executor();
if (!executor.inEventLoop()) {
executor.execute(new Runnable() {
@Override
public void run() {
renegotiateOnEventLoop(promise);
}
});
return promise;
}
renegotiateOnEventLoop(promise);
return promise;
} |
java | public void setChannelSummaries(java.util.Collection<ChannelSummary> channelSummaries) {
if (channelSummaries == null) {
this.channelSummaries = null;
return;
}
this.channelSummaries = new java.util.ArrayList<ChannelSummary>(channelSummaries);
} |
java | @SuppressWarnings("unchecked")
public EList<IfcRelDefinesByProperties> getPropertyDefinitionOf() {
return (EList<IfcRelDefinesByProperties>) eGet(
Ifc2x3tc1Package.Literals.IFC_PROPERTY_SET_DEFINITION__PROPERTY_DEFINITION_OF, true);
} |
python | def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/[email protected]", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos |
python | def read_data_from_bytes(fileContent):
"""
Takes the binary data stored in the binary string provided and extracts the
data for each channel that was saved, along with the sample rate and length
of the data array.
Parameters
----------
fileContent : bytes
bytes object containing the data from a .bin file exported from
the saleae data logger.
Returns
-------
ChannelData : list
List containing a list which contains the data from each channel
LenOf1Channel : int
The length of the data in each channel
NumOfChannels : int
The number of channels saved
SampleTime : float
The time between samples (in seconds)
SampleRate : float
The sample rate (in Hz)
"""
TotalDataLen = struct.unpack('Q', fileContent[:8])[0] # Unsigned long long
NumOfChannels = struct.unpack('I', fileContent[8:12])[0] # unsigned Long
SampleTime = struct.unpack('d', fileContent[12:20])[0]
AllChannelData = struct.unpack("f" * ((len(fileContent) -20) // 4), fileContent[20:])
# ignore the heading bytes (= 20)
# The remaining part forms the body, to know the number of bytes in the body do an integer division by 4 (since 4 bytes = 32 bits = sizeof(float)
LenOf1Channel = int(TotalDataLen/NumOfChannels)
ChannelData = list(get_chunks(AllChannelData, LenOf1Channel))
return ChannelData, LenOf1Channel, NumOfChannels, SampleTime |
python | def decode_page(page):
"""
Return unicode string of geocoder results.
Nearly all services use JSON, so assume UTF8 encoding unless the
response specifies otherwise.
"""
if hasattr(page, 'read'): # urllib
if py3k:
encoding = page.headers.get_param("charset") or "utf-8"
else:
encoding = page.headers.getparam("charset") or "utf-8"
return text_type(page.read(), encoding=encoding)
else: # requests?
encoding = page.headers.get("charset") or "utf-8"
return text_type(page.content, encoding=encoding) |
java | @SuppressWarnings("unchecked")
private static void pinStateChangeCallback(int pin, boolean state) {
Vector<GpioInterruptListener> listenersClone;
listenersClone = (Vector<GpioInterruptListener>) listeners.clone();
for (int i = 0; i < listenersClone.size(); i++) {
GpioInterruptListener listener = listenersClone.elementAt(i);
if(listener != null) {
GpioInterruptEvent event = new GpioInterruptEvent(listener, pin, state);
listener.pinStateChange(event);
}
}
//System.out.println("GPIO PIN [" + pin + "] = " + state);
} |
python | def add_watch_callback(self, *args, **kwargs):
"""
Watch a key or range of keys and call a callback on every event.
If timeout was declared during the client initialization and
the watch cannot be created during that time the method raises
a ``WatchTimedOut`` exception.
:param key: key to watch
:param callback: callback function
:returns: watch_id. Later it could be used for cancelling watch.
"""
try:
return self.watcher.add_callback(*args, **kwargs)
except queue.Empty:
raise exceptions.WatchTimedOut() |
python | def run_steps_from_string(self, spec, language_name='en'):
""" Called from within step definitions to run other steps. """
caller = inspect.currentframe().f_back
line = caller.f_lineno - 1
fname = caller.f_code.co_filename
steps = parse_steps(spec, fname, line, load_language(language_name))
for s in steps:
self.run_step(s) |
java | public InputStream newInputStream() {
return new InputStream() {
private int pos = 0;
public int read() throws IOException {
synchronized(ByteBuffer.this) {
if(pos>=size) return -1;
return buf[pos++];
}
}
public int read(byte[] b, int off, int len) throws IOException {
synchronized(ByteBuffer.this) {
if(size==pos)
return -1;
int sz = Math.min(len,size-pos);
System.arraycopy(buf,pos,b,off,sz);
pos+=sz;
return sz;
}
}
public int available() throws IOException {
synchronized(ByteBuffer.this) {
return size-pos;
}
}
public long skip(long n) throws IOException {
synchronized(ByteBuffer.this) {
int diff = (int) Math.min(n,size-pos);
pos+=diff;
return diff;
}
}
};
} |
python | def _getInputImage (input,group=None):
""" Factory function to return appropriate imageObject class instance"""
# extract primary header and SCI,1 header from input image
sci_ext = 'SCI'
if group in [None,'']:
exten = '[sci,1]'
phdu = fits.getheader(input, memmap=False)
else:
# change to use fits more directly here?
if group.find(',') > 0:
grp = group.split(',')
if grp[0].isalpha():
grp = (grp[0],int(grp[1]))
else:
grp = int(grp[0])
else:
grp = int(group)
phdu = fits.getheader(input, memmap=False)
phdu.extend(fits.getheader(input, ext=grp, memmap=False))
# Extract the instrument name for the data that is being processed by Multidrizzle
_instrument = phdu['INSTRUME']
# Determine the instrument detector in use. NICMOS is a special case because it does
# not use the 'DETECTOR' keyword. It instead used 'CAMERA' to identify which of it's
# 3 camera's is in use. All other instruments support the 'DETECTOR' keyword.
if _instrument == 'NICMOS':
_detector = phdu['CAMERA']
else:
try:
_detector = phdu['DETECTOR']
except KeyError:
# using the phdu as set above (fits.getheader) is MUCH faster and
# works for the majority of data; but fileutil handles waivered fits
phdu = fileutil.getHeader(input+exten)
_detector = phdu['DETECTOR'] # if this fails, let it throw
del phdu # just to keep clean
# Match up the instrument and detector with the right class
# only importing the instrument modules as needed.
try:
if _instrument == 'ACS':
from . import acsData
if _detector == 'HRC': return acsData.HRCInputImage(input,group=group)
if _detector == 'WFC': return acsData.WFCInputImage(input,group=group)
if _detector == 'SBC': return acsData.SBCInputImage(input,group=group)
if _instrument == 'NICMOS':
from . import nicmosData
if _detector == 1: return nicmosData.NIC1InputImage(input)
if _detector == 2: return nicmosData.NIC2InputImage(input)
if _detector == 3: return nicmosData.NIC3InputImage(input)
if _instrument == 'WFPC2':
from . import wfpc2Data
return wfpc2Data.WFPC2InputImage(input,group=group)
"""
if _detector == 1: return wfpc2Data.PCInputImage(input)
if _detector == 2: return wfpc2Data.WF2InputImage(input)
if _detector == 3: return wfpc2Data.WF3InputImage(input)
if _detector == 4: return wfpc2Data.WF4InputImage(input)
"""
if _instrument == 'STIS':
from . import stisData
if _detector == 'CCD': return stisData.CCDInputImage(input,group=group)
if _detector == 'FUV-MAMA': return stisData.FUVInputImage(input,group=group)
if _detector == 'NUV-MAMA': return stisData.NUVInputImage(input,group=group)
if _instrument == 'WFC3':
from . import wfc3Data
if _detector == 'UVIS': return wfc3Data.WFC3UVISInputImage(input,group=group)
if _detector == 'IR': return wfc3Data.WFC3IRInputImage(input,group=group)
except ImportError:
msg = 'No module implemented for '+str(_instrument)+'!'
raise ValueError(msg)
# If a supported instrument is not detected, print the following error message
# and raise an exception.
msg = 'Instrument: ' + str(_instrument) + '/' + str(_detector) + ' not yet supported!'
raise ValueError(msg) |
java | public static DenseVector fromCSV(String csv) {
return Vector.fromCSV(csv).to(Vectors.DENSE);
} |
python | def suspend(self):
"""
Suspends the thread execution.
@rtype: int
@return: Suspend count. If zero, the thread is running.
"""
hThread = self.get_handle(win32.THREAD_SUSPEND_RESUME)
if self.is_wow64():
# FIXME this will be horribly slow on XP 64
# since it'll try to resolve a missing API every time
try:
return win32.Wow64SuspendThread(hThread)
except AttributeError:
pass
return win32.SuspendThread(hThread) |
java | @Override
public boolean canServe(URL refUrl) {
if (refUrl == null || !this.getPath().equals(refUrl.getPath())) {
return false;
}
if(!protocol.equals(refUrl.getProtocol())) {
return false;
}
if (!Constants.NODE_TYPE_SERVICE.equals(this.getParameter(URLParamType.nodeType.getName()))) {
return false;
}
String version = getParameter(URLParamType.version.getName(), URLParamType.version.getValue());
String refVersion = refUrl.getParameter(URLParamType.version.getName(), URLParamType.version.getValue());
if (!version.equals(refVersion)) {
return false;
}
// check serialize
String serialize = getParameter(URLParamType.serialize.getName(), URLParamType.serialize.getValue());
String refSerialize = refUrl.getParameter(URLParamType.serialize.getName(), URLParamType.serialize.getValue());
if (!serialize.equals(refSerialize)) {
return false;
}
// Not going to check group as cross group call is needed
return true;
} |
java | @Override
public final AItemSpecifics<T, ID> process(
final Map<String, Object> pReqVars,
final AItemSpecifics<T, ID> pEntity,
final IRequestData pRequestData) throws Exception {
String fileToUploadName = (String) pRequestData
.getAttribute("fileToUploadName");
OutputStream outs = null;
InputStream ins = null;
try {
String filePath;
if (pEntity.getStringValue1() == null) { //in base language:
String ft = String.valueOf(new Date().getTime());
filePath = this.webAppPath + File.separator
+ this.uploadDirectory + File.separator
+ ft + fileToUploadName;
pEntity.setStringValue2(filePath);
pEntity.setStringValue1(this.uploadDirectory
+ "/" + ft + fileToUploadName);
} else { //I18N files:
String fileLang = pRequestData.getParameter("fileLang");
if (pEntity.getStringValue3() == null
|| !pEntity.getStringValue3().contains(fileLang)) {
throw new ExceptionWithCode(ExceptionWithCode.WRONG_PARAMETER,
"notset_language");
}
int idhHtml = pEntity.getStringValue1().indexOf(".html");
String urlWithoutHtml = pEntity.getStringValue1().substring(0, idhHtml);
filePath = this.webAppPath + File.separator
+ urlWithoutHtml + "_" + fileLang + ".html";
}
ins = (InputStream) pRequestData.getAttribute("fileToUploadInputStream");
outs = new BufferedOutputStream(new FileOutputStream(filePath));
byte[] data = new byte[1024];
int count;
while ((count = ins.read(data)) != -1) {
outs.write(data, 0, count);
}
outs.flush();
} finally {
if (ins != null) {
ins.close();
}
if (outs != null) {
outs.close();
}
}
getSrvOrm().updateEntity(pReqVars, pEntity);
return pEntity;
} |
python | def discoverTokenEndpoints(domain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True):
"""Find the token for the given domain.
Only scan html element matching all criteria in look_in.
optionally the content to be scanned can be given as an argument.
:param domain: the URL of the domain to handle
:param content: the content to be scanned for the endpoint
:param look_in: dictionary with name, id and class_. only element matching all of these will be scanned
:param test_urls: optional flag to test URLs for validation
:param validateCerts: optional flag to enforce HTTPS certificates if present
:rtype: list of endpoints
"""
return discoverEndpoint(domain, ('token_endpoint',), content, look_in, test_urls, validateCerts) |
python | def send(self_p, dest):
"""
Send message to destination socket, and destroy the message after sending
it successfully. If the message has no frames, sends nothing but destroys
the message anyhow. Nullifies the caller's reference to the message (as
it is a destructor).
"""
return lib.zmsg_send(byref(zmsg_p.from_param(self_p)), dest) |
java | public ClientResponse updateApplicationCatalog(File catalogPath, File deploymentPath)
throws IOException, NoConnectionsException, ProcCallException
{
return Client.updateApplicationCatalog(catalogPath, deploymentPath);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.