language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | async def getHiveKey(self, path):
''' Get the value of a key in the cell default hive '''
perm = ('hive:get',) + path
self.user.allowed(perm)
return await self.cell.hive.get(path) |
python | def get_flat_models_from_fields(fields: Sequence[Field]) -> Set[Type['main.BaseModel']]:
"""
Take a list of Pydantic ``Field``s (from a model) that could have been declared as sublcasses of ``BaseModel``
(so, any of them could be a submodel), and generate a set with their models and all the sub-models in the tree.
I.e. if you pass a the fields of a model ``Foo`` (subclass of ``BaseModel``) as ``fields``, and on of them has a
field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also
subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``.
:param fields: a list of Pydantic ``Field``s
:return: a set with any model declared in the fields, and all their sub-models
"""
flat_models: Set[Type['main.BaseModel']] = set()
for field in fields:
flat_models |= get_flat_models_from_field(field)
return flat_models |
java | public static <T extends ImageBase<T>>
TrackerObjectQuad<T> meanShiftComaniciu2003(ConfigComaniciu2003 config, ImageType<T> imageType ) {
TrackerMeanShiftComaniciu2003<T> alg = FactoryTrackerObjectAlgs.meanShiftComaniciu2003(config,imageType);
return new Comaniciu2003_to_TrackerObjectQuad<>(alg, imageType);
} |
java | void setGeometryUserIndex(int geom, int index, int value) {
AttributeStreamOfInt32 stream = m_geometry_indices.get(index);
int pindex = getGeometryIndex_(geom);
if (pindex >= stream.size())
stream.resize(Math.max((int) (pindex * 1.25), (int) 16), -1);
stream.write(pindex, value);
} |
java | public void marshall(DeleteSubscriptionFilterRequest deleteSubscriptionFilterRequest, ProtocolMarshaller protocolMarshaller) {
if (deleteSubscriptionFilterRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(deleteSubscriptionFilterRequest.getLogGroupName(), LOGGROUPNAME_BINDING);
protocolMarshaller.marshall(deleteSubscriptionFilterRequest.getFilterName(), FILTERNAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public boolean checkKeyBelongsToNode(byte[] key, int nodeId) {
List<Integer> nodePartitions = cluster.getNodeById(nodeId).getPartitionIds();
List<Integer> replicatingPartitions = getReplicatingPartitionList(key);
// remove all partitions from the list, except those that belong to the
// node
replicatingPartitions.retainAll(nodePartitions);
return replicatingPartitions.size() > 0;
} |
java | static SourceFile extractSourceMap(
SourceFile jsFile, String sourceMapURL, boolean parseInlineSourceMaps) {
// Javascript version of the compiler can only deal with inline sources.
if (sourceMapURL.startsWith(BASE64_URL_PREFIX)) {
byte[] data =
BaseEncoding.base64().decode(sourceMapURL.substring(BASE64_URL_PREFIX.length()));
String source = new String(data, StandardCharsets.UTF_8);
return SourceFile.fromCode(jsFile.getName() + ".inline.map", source);
}
return null;
} |
python | def update_translations(request):
"""
Update translations: delete orphan translations and creates empty translations for new objects in database.
"""
FieldTranslation.delete_orphan_translations()
num_translations = FieldTranslation.update_translations()
return render_to_response('modeltranslation/admin/update_translations_ok.html',{"num_translations":num_translations}, RequestContext(request)) |
python | def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
for ext in ["", ".bai"]:
if os.path.exists(fname + ext):
with open(fname + ext, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason) |
python | def log(self, msg):
'''Log a message, prefixed with a timestamp.
If a log file was specified in the constructor, it is written there,
otherwise it goes to stdout.
'''
if self.logfile:
fd = self.logfile.fileno()
else:
fd = sys.stdout.fileno()
os.write(fd, ('%.3f %s\n' % (time.time(), msg)).encode('UTF-8')) |
java | @Override
public void setValue(Object value) {
super.setValue(value);
if (parent != null) {
Object parentValue = parent.getValue();
if (parentValue != null) {
writeToObject(parentValue);
parent.setValue(parentValue);
}
}
if (value != null) {
for (Property subProperty : subProperties) {
subProperty.readFromObject(value);
}
}
} |
python | def create_flat_start_model(feature_filename,
state_stay_probabilities,
symbol_list,
output_model_directory,
output_prototype_filename,
htk_trace):
"""
Creates a flat start model by using HCompV to compute the global mean and variance.
Then uses these global mean and variance to create an N-state model for each symbol in the given list.
:param feature_filename: The filename containing the audio and feature file pairs
:param output_model_directory: The directory where to write the created model
:param output_prototype_filename: The prototype model filename
:param htk_trace: Trace level for HTK
:rtype : None
"""
# Create a prototype model
create_prototype_model(feature_filename,
output_prototype_filename,
state_stay_probabilities=state_stay_probabilities)
# Compute the global mean and variance
config.htk_command("HCompV -A -D -T {} -f 0.01 "
"-S {} -m -o {} -M {} {}".format(htk_trace,
feature_filename,
'proto',
output_model_directory,
output_prototype_filename))
# Create an hmmdefs using the global mean and variance for all states and symbols
# Duplicate the model 'proto' -> symbol_list
proto_model_filename = config.path(output_model_directory, 'proto')
model = htk.load_model(proto_model_filename)
model = htk_model_utils.map_hmms(model, {'proto': symbol_list})
# vFloors -> macros
vfloors_filename = config.path(output_model_directory, 'vFloors')
variance_model = htk.load_model(vfloors_filename)
model['macros'] += variance_model['macros']
macros, hmmdefs = htk_model_utils.split_model(model)
htk.save_model(macros, config.path(output_model_directory, 'macros'))
htk.save_model(hmmdefs, config.path(output_model_directory, 'hmmdefs')) |
python | def main():
"""Provide the program's entry point when directly executed."""
if len(sys.argv) != 2:
print("Usage: {} USERNAME".format(sys.argv[0]))
return 1
caching_requestor = prawcore.Requestor(
"prawcore_device_id_auth_example", session=CachingSession()
)
authenticator = prawcore.TrustedAuthenticator(
caching_requestor,
os.environ["PRAWCORE_CLIENT_ID"],
os.environ["PRAWCORE_CLIENT_SECRET"],
)
authorizer = prawcore.ReadOnlyAuthorizer(authenticator)
authorizer.refresh()
user = sys.argv[1]
with prawcore.session(authorizer) as session:
data1 = session.request("GET", "/api/v1/user/{}/trophies".format(user))
with prawcore.session(authorizer) as session:
data2 = session.request("GET", "/api/v1/user/{}/trophies".format(user))
for trophy in data1["data"]["trophies"]:
description = trophy["data"]["description"]
print(
"Original:",
trophy["data"]["name"]
+ (" ({})".format(description) if description else ""),
)
for trophy in data2["data"]["trophies"]:
description = trophy["data"]["description"]
print(
"Cached:",
trophy["data"]["name"]
+ (" ({})".format(description) if description else ""),
)
print(
"----\nCached == Original:",
data2["data"]["trophies"] == data2["data"]["trophies"],
)
return 0 |
java | private void addConnectionRequest() {
if (totalConnection.get() < options.maxPoolSize && poolState.get() == POOL_STATE_OK) {
//ensure to have one worker if was timeout
connectionAppender.prestartCoreThread();
connectionAppenderQueue.offer(() -> {
if ((totalConnection.get() < options.minPoolSize || pendingRequestNumber.get() > 0)
&& totalConnection.get() < options.maxPoolSize) {
try {
addConnection();
} catch (SQLException sqle) {
//eat
}
}
});
}
} |
java | public static <T1, T2, T3, T4, R> Func4<T1, T2, T3, T4, Observable<R>> toAsyncThrowing(final ThrowingFunc4<? super T1, ? super T2, ? super T3, ? super T4, ? extends R> func, final Scheduler scheduler) {
return new Func4<T1, T2, T3, T4, Observable<R>>() {
@Override
public Observable<R> call(T1 t1, T2 t2, T3 t3, T4 t4) {
return startCallable(ThrowingFunctions.toCallable(func, t1, t2, t3, t4), scheduler);
}
};
} |
python | def transfer_learning_tuner(self, additional_parents=None, estimator=None):
"""Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``.
Also, training image in the new tuner's estimator is updated with the provided ``training_image``.
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the transfer learning tuner.
estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with
the desired configuration. There does not need to be a training job associated with this instance.
Returns:
sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer
learning tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"})
Later On:
>>> transfer_learning_tuner.fit(inputs={})
"""
return self._create_warm_start_tuner(additional_parents=additional_parents,
warm_start_type=WarmStartTypes.TRANSFER_LEARNING,
estimator=estimator) |
python | def process(self, item_session: ItemSession, request, response, file_writer_session):
'''Process PhantomJS.
Coroutine.
'''
if response.status_code != 200:
return
if not HTMLReader.is_supported(request=request, response=response):
return
_logger.debug('Starting PhantomJS processing.')
self._file_writer_session = file_writer_session
# FIXME: this is a quick hack for crashes. See #137.
attempts = int(os.environ.get('WPULL_PHANTOMJS_TRIES', 5))
for dummy in range(attempts):
try:
yield from self._run_driver(item_session, request, response)
except asyncio.TimeoutError:
_logger.warning(_('Waiting for page load timed out.'))
break
except PhantomJSCrashed as error:
_logger.exception(__('PhantomJS crashed: {}', error))
else:
break
else:
_logger.warning(__(
_('PhantomJS failed to fetch ‘{url}’. I am sorry.'),
url=request.url_info.url
)) |
python | def saveDirectory(alias):
"""save a directory to a certain alias/nickname"""
if not settings.platformCompatible():
return False
dataFile = open(settings.getDataFile(), "wb")
currentDirectory = os.path.abspath(".")
directory = {alias : currentDirectory}
pickle.dump(directory, dataFile)
speech.success(alias + " will now link to " + currentDirectory + ".")
speech.success("Tip: use 'hallie go to " + alias + "' to change to this directory.") |
java | public static String describeExtensionContext(StructureDefinition ext) {
CommaSeparatedStringBuilder b = new CommaSeparatedStringBuilder();
for (StringType t : ext.getContext())
b.append(t.getValue());
if (!ext.hasContextType())
throw new Error("no context type on "+ext.getUrl());
switch (ext.getContextType()) {
case DATATYPE: return "Use on data type: "+b.toString();
case EXTENSION: return "Use on extension: "+b.toString();
case RESOURCE: return "Use on element: "+b.toString();
case MAPPING: return "Use where element has mapping: "+b.toString();
default:
return "??";
}
} |
python | def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0 |
python | def _modeldesc_from_dict(self, d):
"""Return a string representation of a patsy ModelDesc object"""
lhs_termlist = [Term([LookupFactor(d['lhs_termlist'][0])])]
rhs_termlist = []
for name in d['rhs_termlist']:
if name == '':
rhs_termlist.append(Term([]))
else:
rhs_termlist.append(Term([LookupFactor(name)]))
md = ModelDesc(lhs_termlist, rhs_termlist)
return md |
python | def CreateMuskingumKfacFile(in_drainage_line,
river_id,
length_id,
slope_id,
celerity,
formula_type,
in_connectivity_file,
out_kfac_file,
length_units="km",
slope_percentage=False,
file_geodatabase=None):
r"""
Creates the Kfac file for calibration.
The improved methods using slope to generate values
for Kfac were used here:
Tavakoly, A. A., A. D. Snow, C. H. David, M. L. Follum, D. R. Maidment,
and Z.-L. Yang, (2016) "Continental-Scale River Flow Modeling of the
Mississippi River Basin Using High-Resolution NHDPlus Dataset",
Journal of the American Water Resources Association (JAWRA) 1-22.
DOI: 10.1111/1752-1688.12456
Formula Type Options:
1. :math:`Kfac_n = \frac{RiverLength_n}{Celerity_n}`
2. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}`
3. :math:`Kfac_n = \eta*\frac{RiverLength_n}{\sqrt{RiverSlope_n}}\left[0.05, 0.95\right]`
Where:
:math:`a = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{Celerity_n}}{r}`
:math:`b = \frac{\sum_{n=1}^{r} \frac{RiverLength_n}{\sqrt{RiverSlope_n}}}{r}`
:math:`\eta = \frac{a}{b}`
r = Number of river segments.
Parameters
----------
in_drainage_line: str
Path to the stream network (i.e. Drainage Line) shapefile.
river_id: str
The name of the field with the river ID
(Ex. 'HydroID', 'COMID', or 'LINKNO').
length_id: str
The field name containging the length of the river segment
(Ex. 'LENGTHKM' or 'Length').
slope_id: str
The field name containging the slope of the river segment
(Ex. 'Avg_Slope' or 'Slope').
celerity: float
The flow wave celerity for the watershed in meters per second.
1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown.
formula_type: int
An integer representing the formula type to use when calculating kfac.
in_connectivity_file: str
The path to the RAPID connectivity file.
out_kfac_file: str
The path to the output kfac file.
length_units: str, optional
The units for the length_id field. Supported types are "m" for meters
and "km" for kilometers.
slope_percentage: bool, optional
If True, it assumes the slope given is in percentage and will
divide by 100. Default is False.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option, in_drainage_line
is the name of the stream network feature class
(WARNING: Not always stable with GDAL).
Example::
from RAPIDpy.gis.muskingum import CreateMuskingumKfacFile
CreateMuskingumKfacFile(
in_drainage_line='/path/to/drainageline.shp',
river_id='LINKNO',
length_id='Length',
slope_id='Slope',
celerity=1000.0/3600.0,
formula_type=3,
in_connectivity_file='/path/to/rapid_connect.csv',
out_kfac_file='/path/to/kfac.csv',
length_units="m",
)
""" # noqa
ogr_drainage_line_shapefile_lyr, ogr_drainage_line_shapefile = \
open_shapefile(in_drainage_line, file_geodatabase)
number_of_features = ogr_drainage_line_shapefile_lyr.GetFeatureCount()
river_id_list = np.zeros(number_of_features, dtype=np.int32)
length_list = \
np.zeros(number_of_features, dtype=np.float32)
slope_list = np.zeros(number_of_features, dtype=np.float32)
for feature_idx, drainage_line_feature in \
enumerate(ogr_drainage_line_shapefile_lyr):
river_id_list[feature_idx] = drainage_line_feature.GetField(river_id)
length = drainage_line_feature.GetField(length_id)
if length is not None:
length_list[feature_idx] = length
slope = drainage_line_feature.GetField(slope_id)
if slope is not None:
slope_list[feature_idx] = slope
del ogr_drainage_line_shapefile
if slope_percentage:
slope_list /= 100.0
if length_units == "m":
length_list /= 1000.0
elif length_units != "km":
raise Exception("Invalid length units supplied. "
"Supported units are m and km.")
connectivity_table = np.loadtxt(in_connectivity_file,
delimiter=",",
ndmin=2,
dtype=int)
length_slope_array = []
kfac2_array = []
if formula_type == 1:
log("River Length/Celerity")
elif formula_type == 2:
log("Eta*River Length/Sqrt(River Slope)")
elif formula_type == 3:
log("Eta*River Length/Sqrt(River Slope) [0.05, 0.95]")
else:
raise Exception("Invalid formula type. Valid range: 1-3 ...")
with open_csv(out_kfac_file, 'w') as kfacfile:
kfac_writer = csv_writer(kfacfile)
for row in connectivity_table:
stream_id = int(float(row[0]))
stream_id_index = river_id_list == stream_id
# find the length
stream_length = length_list[stream_id_index] * 1000.0
if formula_type >= 2:
# find the slope
stream_slope = slope_list[stream_id_index]
if stream_slope <= 0:
# if no slope, take average of upstream
# and downstream to get it
next_down_id = int(float(row[1]))
next_down_slope = 0
try:
next_down_index = \
np.where(river_id_list == next_down_id)[0][0]
next_down_slope = slope_list[next_down_index]
except IndexError:
pass
next_up_id = int(float(row[3]))
next_up_slope = 0
try:
next_up_index = \
np.where(river_id_list == next_up_id)[0][0]
next_up_slope = slope_list[next_up_index]
except IndexError:
pass
stream_slope = (next_down_slope + next_up_slope) / 2.0
if stream_slope <= 0:
# if still no slope, set to 0.001
stream_slope = 0.001
length_slope_array.append(stream_length / stream_slope**0.5)
kfac2_array.append(stream_length / celerity)
else:
kfac = stream_length / celerity
kfac_writer.writerow(kfac)
if formula_type >= 2:
if formula_type == 3:
log("Filtering Data by 5th and 95th Percentiles ...")
length_slope_array = np.array(length_slope_array)
percentile_5 = np.percentile(length_slope_array, 5)
percentile_95 = np.percentile(length_slope_array, 95)
length_slope_array[length_slope_array < percentile_5] = \
percentile_5
length_slope_array[length_slope_array > percentile_95] = \
percentile_95
eta = np.mean(kfac2_array) / np.mean(length_slope_array)
log("Kfac2_Avg {0}".format(np.mean(kfac2_array)))
log("Length_Slope Avg {0}".format(np.mean(length_slope_array)))
log("Eta {0}".format(eta))
log("Writing Data ...")
for len_slope in length_slope_array:
kfac_writer.writerow(eta*len_slope) |
python | def start_stop_video(self):
"""Start and stop the video, and change the button.
"""
if self.parent.info.dataset is None:
self.parent.statusBar().showMessage('No Dataset Loaded')
return
# & is added automatically by PyQt, it seems
if 'Start' in self.idx_button.text().replace('&', ''):
try:
self.update_video()
except IndexError as er:
lg.debug(er)
self.idx_button.setText('Not Available / Start')
return
except OSError as er:
lg.debug(er)
self.idx_button.setText('NO VIDEO for this dataset')
return
self.idx_button.setText('Stop')
elif 'Stop' in self.idx_button.text():
self.idx_button.setText('Start')
self.medialistplayer.stop()
self.t.stop() |
python | def api(self, name):
'''return special API by package's name'''
assert name, 'name is none'
if flow.__name__ == name:
api = flow.FlowApi()
elif sign.__name__ == name:
api = sign.SignApi()
elif sms.__name__ == name:
api = sms.SmsApi()
elif tpl.__name__ == name:
api = tpl.TplApi()
elif user.__name__ == name:
api = user.UserApi()
elif voice.__name__ == name:
api = voice.VoiceApi()
assert api, "not found api-" + name
api._init(self._clnt)
return api |
java | private void computeIv(byte label) {
for (int i = 0; i < 14; i++) {
ivStore[i] = masterSalt[i];
}
ivStore[7] ^= label;
ivStore[14] = ivStore[15] = 0;
} |
java | public BoxRequestsFile.UpdateFile getDisableSharedLinkRequest(String id) {
BoxRequestsFile.UpdateFile request = new BoxRequestsFile.UpdateFile(id, getFileInfoUrl(id), mSession)
.setSharedLink(null);
return request;
} |
python | def author_name_from_json(author_json):
"concatenate an author name from json data"
author_name = None
if author_json.get('type'):
if author_json.get('type') == 'group' and author_json.get('name'):
author_name = author_json.get('name')
elif author_json.get('type') == 'person' and author_json.get('name'):
if author_json.get('name').get('preferred'):
author_name = author_json.get('name').get('preferred')
return author_name |
python | def build(self, recursive=True):
"""
Building an assembly buffers the :meth:`components` and :meth:`constraints`.
Running ``build()`` is optional, it's automatically run when requesting
:meth:`components` or :meth:`constraints`.
Mostly it's used to test that there aren't any critical runtime
issues with its construction, but doing anything like *displaying* or
*exporting* will ultimately run a build anyway.
:param recursive: if set, iterates through child components and builds
those as well.
:type recursive: :class:`bool`
"""
# initialize values
self._components = {}
self._constraints = []
def genwrap(obj, name, iter_type=None):
# Force obj to act like a generator.
# this wrapper will always yield at least once.
if isinstance(obj, GeneratorType):
for i in obj:
if (iter_type is not None) and (not isinstance(i, iter_type)):
raise TypeError("%s must yield a %r" % (name, iter_type))
yield i
else:
if (iter_type is not None) and (not isinstance(obj, iter_type)):
raise TypeError("%s must return a %r" % (name, iter_type))
yield obj
# Make Components
components_iter = genwrap(self.make_components(), "make_components", dict)
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
# Make Constraints
constraints_iter = genwrap(self.make_constraints(), "make_components", list)
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
# Run solver : sets components' world coordinates
self.solve()
# Make Alterations
alterations_iter = genwrap(self.make_alterations(), "make_alterations")
next(alterations_iter) # return value is ignored
while True:
(s1, s2, s3) = (True, True, True) # stages
# Make Components
new_components = None
try:
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
except StopIteration:
s1 = False
# Make Constraints
new_constraints = None
try:
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
except StopIteration:
s2 = False
# Run solver : sets components' world coordinates
if new_components or new_constraints:
self.solve()
# Make Alterations
try:
next(alterations_iter) # return value is ignored
except StopIteration:
s3 = False
# end loop when all iters are finished
if not any((s1, s2, s3)):
break
if recursive:
for (name, component) in self._components.items():
component.build(recursive=recursive) |
python | def can_delete_objectives(self):
"""Tests if this user can delete Objectives.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an Objective
will result in a PermissionDenied. This is intended as a hint
to an application that may opt not to offer delete operations to
an unauthorized user.
return: (boolean) - false if Objective deletion is not
authorized, true otherwise
compliance: mandatory - This method must be implemented.
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['objectiveHints']['canDelete'] |
java | @Override
public AppEngineGetList<E> filter(Filter<?>... filters) {
if (filters == null) {
throw new IllegalArgumentException("'filters' must not be [" + null + "]");
}
this.filters = Arrays.asList(filters);
return this;
} |
python | def get_members(self, access=None):
"""
returns list of members according to access type
If access equals to None, then returned list will contain all members.
You should not modify the list content, otherwise different
optimization data will stop work and may to give you wrong results.
:param access: describes desired members
:type access: :class:ACCESS_TYPES
:rtype: [ members ]
"""
if access == ACCESS_TYPES.PUBLIC:
return self.public_members
elif access == ACCESS_TYPES.PROTECTED:
return self.protected_members
elif access == ACCESS_TYPES.PRIVATE:
return self.private_members
all_members = []
all_members.extend(self.public_members)
all_members.extend(self.protected_members)
all_members.extend(self.private_members)
return all_members |
python | def check_license(package_info, *args):
"""
Does the package have a license classifier?
:param package_info: package_info dictionary
:return: Tuple (is the condition True or False?, reason if it is False else None, score to be applied)
"""
classifiers = package_info.get('classifiers')
reason = "No License"
result = False
if len([c for c in classifiers if c.startswith('License ::')]) > 0:
result = True
return result, reason, HAS_LICENSE |
java | static boolean isFieldRequired(Field field) {
BigQueryDataField bqAnnotation = field.getAnnotation(BigQueryDataField.class);
return (bqAnnotation != null && bqAnnotation.mode().equals(BigQueryFieldMode.REQUIRED))
|| field.getType().isPrimitive();
} |
java | public static List<String> findAll(Pattern pattern, CharSequence content, int group) {
return findAll(pattern, content, group, new ArrayList<String>());
} |
java | private TernaryValue getBooleanValueWithTypes(Node n) {
switch (n.getToken()) {
case ASSIGN:
case COMMA:
return getBooleanValueWithTypes(n.getLastChild());
case NOT:
return getBooleanValueWithTypes(n.getLastChild()).not();
case AND:
// Assume the left-hand side is unknown. If it's not then we'll report it elsewhere. This
// prevents revisiting deeper nodes repeatedly, which would result in O(n^2) performance.
return TernaryValue.UNKNOWN.and(getBooleanValueWithTypes(n.getLastChild()));
case OR:
// Assume the left-hand side is unknown. If it's not then we'll report it elsewhere. This
// prevents revisiting deeper nodes repeatedly, which would result in O(n^2) performance.
return TernaryValue.UNKNOWN.or(getBooleanValueWithTypes(n.getLastChild()));
case HOOK:
{
TernaryValue trueValue = getBooleanValueWithTypes(n.getSecondChild());
TernaryValue falseValue = getBooleanValueWithTypes(n.getLastChild());
return trueValue.equals(falseValue) ? trueValue : TernaryValue.UNKNOWN;
}
case FUNCTION:
case CLASS:
case NEW:
case ARRAYLIT:
case OBJECTLIT:
return TernaryValue.TRUE;
case VOID:
return TernaryValue.FALSE;
case GETPROP:
case GETELEM:
// Assume that type information on getprops and getelems are likely to be wrong. This
// prevents spurious warnings from not including undefined in getelem's return value,
// from existence checks of symbols the externs define as certainly true, or from default
// initialization of globals ({@code x.y = x.y || {}}).
return TernaryValue.UNKNOWN;
default:
}
// If we reach this point then all the composite structures that we can decompose have
// already been handled, leaving only qualified names and type-aware checks to handle below.
// Note that much of the switch above in fact duplicates the logic in getPureBooleanValue,
// though with some subtle differences. Important differences include (1) avoiding recursion
// into the left-hand-side of nested logical operators, instead treating them as unknown since
// they would have already been reported elsewhere in the traversal had they been otherwise
// (this guarantees we visit each node once, rather than quadratically repeating work);
// (2) it allows side effects to still have a boolean value (this is more in line with
// NodeUtil#getBooleanValue); (3) it propagates our unique amalgam of syntax-based and
// type-based checks to work when more deeply nested (i.e. recursively). These differences
// rely on assumptions that are very specific to this use case, so it does not make sense to
// upstream them.
TernaryValue pure = NodeUtil.getPureBooleanValue(n);
if (pure != TernaryValue.UNKNOWN || n.isName()) {
// If the truthiness is determinstic from the syntax then return that immediately.
// Alternatively, NAME nodes also get a pass since we don't trust the type information.
return pure;
}
JSType type = n.getJSType();
if (type != null) {
// Distrust types we think are always truthy, since sometimes the types lie, even for results
// of function calls (e.g. Map.prototype.get), so it's still important to check. But
// always-falsy values are a little more obviously wrong and there should be no reason for
// those type annotations to be lies. ANDing with UNKNOWN ensures we never return TRUE.
return TernaryValue.UNKNOWN.and(type.getPossibleToBooleanOutcomes().toTernaryValue());
}
return TernaryValue.UNKNOWN;
} |
python | def GetStorageMediaImageTypeIndicators(cls, path_spec, resolver_context=None):
"""Determines if a file contains a supported storage media image types.
Args:
path_spec (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built-in context which is not multi process safe.
Returns:
list[str]: supported format type indicators.
"""
if (cls._storage_media_image_remainder_list is None or
cls._storage_media_image_store is None):
specification_store, remainder_list = cls._GetSpecificationStore(
definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE)
cls._storage_media_image_remainder_list = remainder_list
cls._storage_media_image_store = specification_store
if cls._storage_media_image_scanner is None:
cls._storage_media_image_scanner = cls._GetSignatureScanner(
cls._storage_media_image_store)
return cls._GetTypeIndicators(
cls._storage_media_image_scanner, cls._storage_media_image_store,
cls._storage_media_image_remainder_list, path_spec,
resolver_context=resolver_context) |
java | public List<JvmAnnotationReference> findAnnotations(Set<String> qualifiedNames, JvmAnnotationTarget annotationTarget) {
List<JvmAnnotationReference> result = new ArrayList<>();
if (annotationTarget != null) {
for (JvmAnnotationReference annotation : annotationTarget.getAnnotations()) {
String id = annotation.getAnnotation().getIdentifier();
if (qualifiedNames.contains(id)) {
result.add(annotation);
}
}
}
return result;
} |
python | def send_url(amount, redirect_url, url, api):
'''
return payment gateway url to redirect user
to it for payment.
'''
values = {'api': api, 'amount': amount, 'redirect': redirect_url}
send_request = requests.post(SEND_URL_FINAL, data=values)
id_get = send_request.text
print(id_get)
if int(id_get) > 0:
print(".معتبر است id_get")
payment_gateway_url = '%s%s' % (GATEWAY_URL_FINAL, id_get)
return payment_gateway_url
elif id_get == "-1":
print(
" apiارسالی با نوع apiتعریف شده در paylineسازگار نیست.")
elif id_get == "-2":
print(
"مقدار amountداده عددي نمی باشد و یا کمتر از 1000 ریال است.")
elif id_get == "-3":
print("مقدار redirectرشته nullاست.")
elif id_get == "-4":
print(
"درگاهی با اطلاعات ارسالی یافت نشده و یا در حالت انتظار می باشد")
else:
print("some other error(s) occurred.") |
python | def delete_account(self, account):
""" Account was deleted. """
try:
luser = self._get_account(account.username)
groups = luser['groups'].load(database=self._database)
for group in groups:
changes = changeset(group, {})
changes = group.remove_member(changes, luser)
save(changes, database=self._database)
delete(luser, database=self._database)
except ObjectDoesNotExist:
# it doesn't matter if it doesn't exist
pass |
python | def charge(
self,
amount,
currency=None,
application_fee=None,
capture=None,
description=None,
destination=None,
metadata=None,
shipping=None,
source=None,
statement_descriptor=None,
idempotency_key=None,
):
"""
Creates a charge for this customer.
Parameters not implemented:
* **receipt_email** - Since this is a charge on a customer, the customer's email address is used.
:param amount: The amount to charge.
:type amount: Decimal. Precision is 2; anything more will be ignored.
:param currency: 3-letter ISO code for currency
:type currency: string
:param application_fee: A fee that will be applied to the charge and transfered to the platform owner's
account.
:type application_fee: Decimal. Precision is 2; anything more will be ignored.
:param capture: Whether or not to immediately capture the charge. When false, the charge issues an
authorization (or pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. Default is True
:type capture: bool
:param description: An arbitrary string.
:type description: string
:param destination: An account to make the charge on behalf of.
:type destination: Account
:param metadata: A set of key/value pairs useful for storing additional information.
:type metadata: dict
:param shipping: Shipping information for the charge.
:type shipping: dict
:param source: The source to use for this charge. Must be a source attributed to this customer. If None,
the customer's default source is used. Can be either the id of the source or the source object
itself.
:type source: string, Source
:param statement_descriptor: An arbitrary string to be displayed on the customer's credit card statement.
:type statement_descriptor: string
"""
if not isinstance(amount, decimal.Decimal):
raise ValueError("You must supply a decimal value representing dollars.")
# TODO: better default detection (should charge in customer default)
currency = currency or "usd"
# Convert Source to id
if source and isinstance(source, StripeModel):
source = source.id
stripe_charge = Charge._api_create(
amount=int(amount * 100), # Convert dollars into cents
currency=currency,
application_fee=int(application_fee * 100)
if application_fee
else None, # Convert dollars into cents
capture=capture,
description=description,
destination=destination,
metadata=metadata,
shipping=shipping,
customer=self.id,
source=source,
statement_descriptor=statement_descriptor,
idempotency_key=idempotency_key,
)
return Charge.sync_from_stripe_data(stripe_charge) |
python | def getBottomLeft(self):
"""
Retrieves a tuple with the x,y coordinates of the lower left point of the ellipse.
Requires the radius and the coordinates to be numbers
"""
return (float(self.get_cx()) - float(self.get_rx()), float(self.get_cy()) - float(self.get_ry())) |
java | public void setAssociationIds(java.util.Collection<String> associationIds) {
if (associationIds == null) {
this.associationIds = null;
return;
}
this.associationIds = new com.amazonaws.internal.SdkInternalList<String>(associationIds);
} |
java | public void marshall(ThirdPartyJobDetails thirdPartyJobDetails, ProtocolMarshaller protocolMarshaller) {
if (thirdPartyJobDetails == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(thirdPartyJobDetails.getId(), ID_BINDING);
protocolMarshaller.marshall(thirdPartyJobDetails.getData(), DATA_BINDING);
protocolMarshaller.marshall(thirdPartyJobDetails.getNonce(), NONCE_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | @Override
public final Boolean apply(List<? extends TreeNode> path, TreeNode theNode) {
return test(path, theNode);
} |
python | def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body))) |
python | def convert_ram_sdp_ar(ADDR_WIDTH=8, DATA_WIDTH=8):
''' Convert RAM: Simple-Dual-Port, Asynchronous Read'''
clk = Signal(bool(0))
we = Signal(bool(0))
addrw = Signal(intbv(0)[ADDR_WIDTH:])
addrr = Signal(intbv(0)[ADDR_WIDTH:])
di = Signal(intbv(0)[DATA_WIDTH:])
do = Signal(intbv(0)[DATA_WIDTH:])
toVerilog(ram_sdp_ar, clk, we, addrw, addrr, di, do) |
python | def glBufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
res = _lib.glBufferData(target, size, data, usage) |
python | async def get_data(self):
"""Retrieve the data."""
url = '{}/{}'.format(self.url, 'all')
try:
with async_timeout.timeout(5, loop=self._loop):
if self.password is None:
response = await self._session.get(url)
else:
auth = aiohttp.BasicAuth(self.username, self.password)
response = await self._session.get(url, auth=auth)
_LOGGER.debug("Response from Glances API: %s", response.status)
print(response.status)
print(response.text)
self.data = await response.json()
_LOGGER.debug(self.data)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Can not load data from Glances API")
raise exceptions.GlancesApiConnectionError() |
python | def dot(self, rhs):
"""
Return the dot product of this vector and *rhs*.
"""
return self.x * rhs.x + self.y * rhs.y + self.z * rhs.z |
java | public static ResourceField getMpxjField(int value)
{
ResourceField result = null;
if (value >= 0 && value < MPX_MPXJ_ARRAY.length)
{
result = MPX_MPXJ_ARRAY[value];
}
return (result);
} |
java | public final void rollbackRemoteTransaction(GlobalTransaction gtx) {
RpcManager rpcManager = cache.getRpcManager();
CommandsFactory factory = cache.getComponentRegistry().getCommandsFactory();
try {
RollbackCommand rollbackCommand = factory.buildRollbackCommand(gtx);
rollbackCommand.setTopologyId(rpcManager.getTopologyId());
CompletionStage<Void> cs = rpcManager
.invokeCommandOnAll(rollbackCommand, validOnly(), rpcManager.getSyncRpcOptions());
factory.initializeReplicableCommand(rollbackCommand, false);
rollbackCommand.invokeAsync().join();
cs.toCompletableFuture().join();
} catch (Throwable throwable) {
throw Util.rewrapAsCacheException(throwable);
} finally {
forgetTransaction(gtx, rpcManager, factory);
}
} |
python | def set_ssh_creds(config, args):
"""
Set ssh credentials into config.
Note that these values might also be set in ~/.bangrc. If they are
specified both in ~/.bangrc and as command-line arguments to ``bang``, then
the command-line arguments win.
"""
creds = config.get(A.DEPLOYER_CREDS, {})
creds[A.creds.SSH_USER] = args.user if args.user else creds.get(
A.creds.SSH_USER,
DEFAULT_SSH_USER,
)
if args.ask_pass:
creds[A.creds.SSH_PASS] = getpass.getpass('SSH Password: ')
config[A.DEPLOYER_CREDS] = creds |
python | def find_schedules(self, courses=None, return_generator=False):
"""Returns all the possible course combinations. Assumes no duplicate courses.
``return_generator``: If True, returns a generator instead of collection. Generators
are friendlier to your memory and save computation time if not all solutions are
used.
"""
self.p.reset()
self.create_variables(courses)
self.create_constraints(courses)
if return_generator:
return self.p.iter_solutions()
return self.p.get_solutions() |
java | @Override
public Response delete(String CorpNum, String MgtKey)
throws PopbillException {
if (MgtKey == null || MgtKey.isEmpty())
throw new PopbillException(-99999999, "관리번호가 입력되지 않았습니다.");
return delete(CorpNum, MgtKey, null);
} |
python | def escape(url):
'''
add escape character `|` to `url`
'''
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith('|'):
return url
else:
return '|{0}'.format(url)
elif scheme == 'salt':
path, saltenv = parse(url)
if path.startswith('|'):
return create(path, saltenv)
else:
return create('|{0}'.format(path), saltenv)
else:
return url |
python | def _correct_build_location(self):
# type: () -> None
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.run_egg_info to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir.path
assert (self._ideal_build_dir is not None and
self._ideal_build_dir.path) # type: ignore
old_location = self._temp_build_dir.path
self._temp_build_dir.path = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir.path = new_location
self._ideal_build_dir = None
self.source_dir = os.path.normpath(os.path.abspath(new_location))
self._egg_info_path = None
# Correct the metadata directory, if it exists
if self.metadata_directory:
old_meta = self.metadata_directory
rel = os.path.relpath(old_meta, start=old_location)
new_meta = os.path.join(new_location, rel)
new_meta = os.path.normpath(os.path.abspath(new_meta))
self.metadata_directory = new_meta |
java | @SuppressWarnings("squid:S1067")
protected boolean isAtStartOfNumber() {
return input.current().isDigit()
|| input.current().is('-') && input.next().isDigit()
|| input.current().is('-') && input.next().is('.') && input.next(2).isDigit()
|| input.current().is('.') && input.next().isDigit();
} |
java | public alluxio.proto.dataserver.Protocol.CreateUfsBlockOptionsOrBuilder getCreateUfsBlockOptionsOrBuilder() {
return createUfsBlockOptions_ == null ? alluxio.proto.dataserver.Protocol.CreateUfsBlockOptions.getDefaultInstance() : createUfsBlockOptions_;
} |
java | @Setup
public void setup() {
System.setProperty("io.netty.buffer.checkAccessible", checkAccessible);
System.setProperty("io.netty.buffer.checkBounds", checkBounds);
buffer = bufferType.newBuffer();
} |
java | private byte[] filterHTML(HttpServletRequest request,
DataResponseWrapper response)
throws ServletException {
byte[] data = response.getData();
InputStream is = new ByteArrayInputStream(data);
Document doc = tidy.parseDOM(is, null);
XPath xpath;
synchronized(XPATH_FACTORY) {
xpath = XPATH_FACTORY.newXPath();
}
NodeList rows = null;
try {
rows =
(NodeList) xpath
.evaluate("/html/body/center/center/table/tr",
doc,
XPathConstants.NODESET);
} catch (XPathExpressionException xpe) {
throw new ServletException("Error parsing HTML for search results: ",
xpe);
}
// only the header row, no results.
if (rows.getLength() == 1) {
logger.debug("No results to filter.");
return data;
}
NodeList headers = rows.item(0).getChildNodes();
int numHeaders = headers.getLength();
int pidHeader = -1;
// ensure we have 'pid' in the list and also that it exists
for (int x = 0; x < numHeaders; x++) {
String header =
headers.item(x).getFirstChild().getFirstChild()
.getNodeValue();
if ("pid".equals(header)) {
pidHeader = x;
}
}
if (pidHeader == -1) {
throw new ServletException("pid field not in result list!");
}
Map<String, Node> pids = new HashMap<String, Node>();
// start from 1 to skip the header row.
for (int x = 1; x < rows.getLength(); x++) {
NodeList elements = rows.item(x).getChildNodes();
Node pidParentA = elements.item(pidHeader).getFirstChild();
if (pidParentA != null && pidParentA.getNodeName().equals("a")) {
String pid = pidParentA.getFirstChild().getNodeValue();
pids.put(pid, rows.item(x));
}
}
Set<Result> results = evaluatePids(pids.keySet(), request, response);
for (Result r : results) {
String resource = r.getResource();
if (resource == null || resource.isEmpty()) {
logger.warn("This resource has no resource identifier in the xacml response results!");
resource = "";
} else {
logger.debug("Checking: {}", resource);
}
int lastSlash = resource.lastIndexOf('/');
String rid = resource.substring(lastSlash + 1);
if (r.getStatus().getCode().contains(Status.STATUS_OK)
&& r.getDecision() != Result.DECISION_PERMIT) {
Node node = pids.get(rid);
node.getParentNode().removeChild(node.getNextSibling());
node.getParentNode().removeChild(node);
logger.debug("Removing: {} [{}]", resource, rid);
}
}
Source src = new DOMSource(doc);
ByteArrayOutputStream os = new ByteArrayOutputStream();
javax.xml.transform.Result dst = new StreamResult(os);
try {
xFormer.transform(src, dst);
} catch (TransformerException te) {
throw new ServletException("error generating output", te);
}
return os.toByteArray();
} |
java | private void downloadStormCode(Map conf, String topologyId, String masterCodeDir) throws IOException, TException {
String clusterMode = StormConfig.cluster_mode(conf);
if (clusterMode.endsWith("distributed")) {
BlobStoreUtils.downloadDistributeStormCode(conf, topologyId, masterCodeDir);
} else if (clusterMode.endsWith("local")) {
BlobStoreUtils.downloadLocalStormCode(conf, topologyId, masterCodeDir);
}
} |
java | public static String getZone(String[] availZones, InstanceInfo myInfo) {
String instanceZone = ((availZones == null || availZones.length == 0) ? "default"
: availZones[0]);
if (myInfo != null
&& myInfo.getDataCenterInfo().getName() == DataCenterInfo.Name.Amazon) {
String awsInstanceZone = ((AmazonInfo) myInfo.getDataCenterInfo())
.get(AmazonInfo.MetaDataKey.availabilityZone);
if (awsInstanceZone != null) {
instanceZone = awsInstanceZone;
}
}
return instanceZone;
} |
python | def acquire(self, timeout=None):
"""Acquires the lock if in the unlocked state otherwise switch
back to the parent coroutine.
"""
green = getcurrent()
parent = green.parent
if parent is None:
raise MustBeInChildGreenlet('GreenLock.acquire in main greenlet')
if self._local.locked:
future = create_future(self._loop)
self._queue.append(future)
parent.switch(future)
self._local.locked = green
return self.locked() |
java | public static int[] stringsToInts(String[] stringreps) {
int[] nums = new int[stringreps.length];
for (int i = 0; i < stringreps.length; i++)
nums[i] = Integer.parseInt(stringreps[i]);
return nums;
} |
python | def issuperset(self, other):
"""
Check if the contents of `self` is a superset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items >= other.items |
python | def run(self):
"""
Captures remote hosts memory
"""
logger = logging.getLogger(__name__)
try:
# Check repository GPG settings before starting workers
# Handling this here prevents subprocesses from needing stdin access
repo_conf = self.config['repository']
repo = None
if repo_conf['enabled'] and repo_conf['gpg_verify']:
try:
repo = Repository(repo_conf['url'],
repo_conf['gpg_verify'])
repo.init_gpg()
except Exception as ex:
# Do not prompt to install gpg keys unless running interactively
if repo is not None and self.library is False:
if isinstance(ex, RepositoryUntrustedSigningKeyError):
installed = repo.prompt_for_install()
if installed is False:
logger.critical(("repository signature not "
"installed, install the "
"signature manually or use "
"the --gpg-no-verify flag "
"to bypass this check"))
quit(1)
else:
logger.critical(ex)
quit(1)
conf = self.map_config()
workers = Workers(conf, self.config['workers'], name=self.name, library=self.library)
description = 'memory capture action'
results = workers.spawn(description)
self.statistics(results)
if self.library is True:
return dict([('total', self.total),
('completed', self.completed_addresses),
('failed', self.failed_addresses)])
else:
logger.info(("{0} hosts processed. completed: {1} "
"failed {2}".format(self.total, self.completed,
self.failed)))
logger.info("completed_hosts: {0}".format(self.completed_addresses))
logger.info("failed_hosts: {0}".format(self.failed_addresses))
quit()
except KeyboardInterrupt:
workers.cleanup(terminate=True)
if self.library:
raise
else:
quit(1) |
java | @Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case AfplibPackage.GSCOL__COL:
return COL_EDEFAULT == null ? col != null : !COL_EDEFAULT.equals(col);
}
return super.eIsSet(featureID);
} |
python | def get_version(self, is_full: bool = False) -> dict or str:
"""
This interface is used to get the version information of the connected node in current network.
Return:
the version information of the connected node.
"""
payload = self.generate_json_rpc_payload(RpcMethod.GET_VERSION)
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] |
python | def _break_reads(self, contig, position, fout, min_read_length=250):
'''Get all reads from contig, but breaks them all at given position (0-based) in the reference. Writes to fout. Currently pproximate where it breaks (ignores indels in the alignment)'''
sam_reader = pysam.Samfile(self.bam, "rb")
for read in sam_reader.fetch(contig):
seqs = []
if read.pos < position < read.reference_end - 1:
split_point = position - read.pos
if split_point - 1 >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(0, split_point)
sequence.id += '.left'
seqs.append(sequence)
if read.query_length - split_point >= min_read_length:
sequence = mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out).subseq(split_point, read.query_length)
sequence.id += '.right'
seqs.append(sequence)
else:
seqs.append(mapping.aligned_read_to_read(read, revcomp=False, ignore_quality=not self.fastq_out))
for seq in seqs:
if read.is_reverse:
seq.revcomp()
print(seq, file=fout) |
java | protected void fireKNNsInserted(DBIDs insertions, DBIDs updates) {
KNNChangeEvent e = new KNNChangeEvent(this, KNNChangeEvent.Type.INSERT, insertions, updates);
Object[] listeners = listenerList.getListenerList();
for(int i = listeners.length - 2; i >= 0; i -= 2) {
if(listeners[i] == KNNListener.class) {
((KNNListener) listeners[i + 1]).kNNsChanged(e);
}
}
} |
java | public List<ServerMonitoringStatistics> getMonitoringStats(GroupFilter groupFilter, ServerMonitoringFilter config) {
return getMonitoringStats(Arrays.asList(getRefsFromFilter(groupFilter)), config);
} |
java | public BaseListener getNextValidListener(int iMoveMode)
{
if (m_listener != null)
{
if ((m_listener.isEnabled()) & (m_listener.respondsToMode(iMoveMode)))
return m_listener;
else
return m_listener.getNextValidListener(iMoveMode);
}
else
return null;
} |
python | def init_package(path=None, name='manage'):
"""Initialize (import) the submodules, and recursively the
subpackages, of a "manage" package at ``path``.
``path`` may be specified as either a system directory path or a
list of these.
If ``path`` is unspecified, it is inferred from the already-imported
"manage" top-level module.
"""
if path is None:
manager = sys.modules[name]
init_package(manager.__path__, name)
return
if isinstance(path, str):
init_package([path], name)
return
for module_info in pkgutil.walk_packages(path, f'{name}.'):
if not module_info.ispkg:
importlib.import_module(module_info.name) |
java | public org.tensorflow.util.BundleHeaderProto.Endianness getEndianness() {
org.tensorflow.util.BundleHeaderProto.Endianness result = org.tensorflow.util.BundleHeaderProto.Endianness.valueOf(endianness_);
return result == null ? org.tensorflow.util.BundleHeaderProto.Endianness.UNRECOGNIZED : result;
} |
python | def update(self, **kwargs) -> "UpdateQuery":
"""
Update all objects in QuerySet with given kwargs.
"""
return UpdateQuery(
db=self._db,
model=self.model,
update_kwargs=kwargs,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
) |
java | public static void drop(DB db, String tableName) {
DBSetup.drop(CallInfo.create(), db, tableName);
} |
java | public static int mix(int base, int added) {
float bAlpha = ColorHelper.getAlpha(base) / 255f;
float aAlpha = ColorHelper.getAlpha(added) / 255f;
float alpha = 1 - (1 - bAlpha) * (1 - aAlpha); // alpha
int bR = ColorHelper.getRed(base);
int bG = ColorHelper.getGreen(base);
int bB = ColorHelper.getBlue(base);
int aR = ColorHelper.getRed(added);
int aG = ColorHelper.getGreen(added);
int aB = ColorHelper.getBlue(added);
int r = Math.round((aR * aAlpha / alpha) + (bR * bAlpha * (1 - aAlpha) / alpha)); // red
int g = Math.round((aG * aAlpha / alpha) + (bG * bAlpha * (1 - aAlpha) / alpha)); // green
int b = Math.round((aB * aAlpha / alpha) + (bB * bAlpha * (1 - aAlpha) / alpha)); // blue
return getARGB(r, g, b, (int) clamp(alpha * MAX));
} |
java | public DataSource<StringValue> readTextFileWithValue(String filePath, String charsetName, boolean skipInvalidLines) {
Preconditions.checkNotNull(filePath, "The file path may not be null.");
TextValueInputFormat format = new TextValueInputFormat(new Path(filePath));
format.setCharsetName(charsetName);
format.setSkipInvalidLines(skipInvalidLines);
return new DataSource<>(this, format, new ValueTypeInfo<>(StringValue.class), Utils.getCallLocationName());
} |
python | def _parse_error_message(self, message):
"""Parses the eAPI failure response message
This method accepts an eAPI failure message and parses the necesary
parts in order to generate a CommandError.
Args:
message (str): The error message to parse
Returns:
tuple: A tuple that consists of the following:
* code: The error code specified in the failure message
* message: The error text specified in the failure message
* error: The error text from the command that generated the
error (the last command that ran)
* output: A list of all output from all commands
"""
msg = message['error']['message']
code = message['error']['code']
err = None
out = None
if 'data' in message['error']:
err = ' '.join(message['error']['data'][-1]['errors'])
out = message['error']['data']
return code, msg, err, out |
python | def topics_count(self):
""" Returns the number of topics associated with the current node and its descendants. """
return self.obj.direct_topics_count + sum(n.topics_count for n in self.children) |
python | def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
"""
# TODO: Add time interval to this
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError("Stream with id '{}' not found".format(stream_id))
stream = self.streams[stream_id]
query = stream_id.as_raw()
with switch_db(StreamInstanceModel, 'hyperstream'):
StreamInstanceModel.objects(__raw__=query).delete()
# Also update the stream status
stream.calculated_intervals = TimeIntervals([])
if remove_definition:
with switch_db(StreamDefinitionModel, 'hyperstream'):
StreamDefinitionModel.objects(__raw__=query).delete()
logging.info("Purged stream {}".format(stream_id)) |
python | def _det_tc(detector_name, ra, dec, tc, ref_frame='geocentric'):
"""Returns the coalescence time of a signal in the given detector.
Parameters
----------
detector_name : string
The name of the detector, e.g., 'H1'.
ra : float
The right ascension of the signal, in radians.
dec : float
The declination of the signal, in radians.
tc : float
The GPS time of the coalescence of the signal in the `ref_frame`.
ref_frame : {'geocentric', string}
The reference frame that the given coalescence time is defined in.
May specify 'geocentric', or a detector name; default is 'geocentric'.
Returns
-------
float :
The GPS time of the coalescence in detector `detector_name`.
"""
if ref_frame == detector_name:
return tc
detector = Detector(detector_name)
if ref_frame == 'geocentric':
return tc + detector.time_delay_from_earth_center(ra, dec, tc)
else:
other = Detector(ref_frame)
return tc + detector.time_delay_from_detector(other, ra, dec, tc) |
java | public <FT> void registerConverter(Class<FT> clazz, Converter<FT, ?> converter) {
converterMap.put(clazz, converter);
} |
java | void updateCornerLeft() {
if (m_corner != null) {
int popupLeft = popup.getPopupLeft();
int dif = (getAbsoluteLeft() - popupLeft) + OFFSET;
m_corner.getStyle().setLeft(dif, Unit.PX);
}
} |
python | def add(self, val):
"""Add the element *val* to the list."""
_maxes, _lists = self._maxes, self._lists
if _maxes:
pos = bisect_right(_maxes, val)
if pos == len(_maxes):
pos -= 1
_maxes[pos] = val
_lists[pos].append(val)
else:
insort(_lists[pos], val)
self._expand(pos)
else:
_maxes.append(val)
_lists.append([val])
self._len += 1 |
python | def define_task(name,
tick_script,
task_type='stream',
database=None,
retention_policy='default',
dbrps=None):
'''
Define a task. Serves as both create/update.
name
Name of the task.
tick_script
Path to the TICK script for the task. Can be a salt:// source.
task_type
Task type. Defaults to 'stream'
dbrps
A list of databases and retention policies in "dbname"."rpname" format
to fetch data from. For backward compatibility, the value of
'database' and 'retention_policy' will be merged as part of dbrps.
.. versionadded:: 2019.2.0
database
Which database to fetch data from.
retention_policy
Which retention policy to fetch data from. Defaults to 'default'.
CLI Example:
.. code-block:: bash
salt '*' kapacitor.define_task cpu salt://kapacitor/cpu.tick database=telegraf
'''
if not database and not dbrps:
log.error("Providing database name or dbrps is mandatory.")
return False
if version() < '0.13':
cmd = 'kapacitor define -name {0}'.format(name)
else:
cmd = 'kapacitor define {0}'.format(name)
if tick_script.startswith('salt://'):
tick_script = __salt__['cp.cache_file'](tick_script, __env__)
cmd += ' -tick {0}'.format(tick_script)
if task_type:
cmd += ' -type {0}'.format(task_type)
if not dbrps:
dbrps = []
if database and retention_policy:
dbrp = '{0}.{1}'.format(database, retention_policy)
dbrps.append(dbrp)
if dbrps:
for dbrp in dbrps:
cmd += ' -dbrp {0}'.format(dbrp)
return _run_cmd(cmd) |
java | public Nfs3AccessResponse wrapped_getAccess(NfsAccessRequest request) throws IOException {
NfsResponseHandler<Nfs3AccessResponse> responseHandler = new NfsResponseHandler<Nfs3AccessResponse>() {
/* (non-Javadoc)
* @see com.emc.ecs.nfsclient.rpc.RpcResponseHandler#makeNewResponse()
*/
protected Nfs3AccessResponse makeNewResponse() {
return new Nfs3AccessResponse();
}
};
_rpcWrapper.callRpcWrapped(request, responseHandler);
return responseHandler.getResponse();
} |
java | @Override
public void endImport() {
if (!m_batch.isEmpty()) {
mergeInTransaction(m_em, m_batch);
}
if (m_em != null && m_em.isOpen()) {
m_em.close();
}
if (m_emf != null) {
m_emf.close();
}
} |
python | def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen-1:
# Check for e.g. foo matching foo/#
if spos == slen-3 and sub[spos+1] == '/' and sub[spos+2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen-1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos+1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result |
java | public static void assertOpenAndActive(@NonNull String ws, @NonNull String errorMsg) throws ND4JWorkspaceException {
if (!Nd4j.getWorkspaceManager().checkIfWorkspaceExistsAndActive(ws)) {
throw new ND4JWorkspaceException(errorMsg);
}
} |
python | def export(gandi, resource, output, force, intermediate):
""" Write the certificate to <output> or <fqdn>.crt.
Resource can be a CN or an ID
"""
ids = []
for res in resource:
ids.extend(gandi.certificate.usable_ids(res))
if output and len(ids) > 1:
gandi.echo('Too many certs found, you must specify which cert you '
'want to export')
return
for id_ in set(ids):
cert = gandi.certificate.info(id_)
if 'cert' not in cert:
continue
if cert['status'] != 'valid':
gandi.echo('The certificate must be in valid status to be '
'exported (%s).' % id_)
continue
cert_filename = cert['cn'].replace('*.', 'wildcard.', 1)
crt_filename = output or cert_filename + '.crt'
if not force and os.path.isfile(crt_filename):
gandi.echo('The file %s already exists.' % crt_filename)
continue
crt = gandi.certificate.pretty_format_cert(cert)
if crt:
with open(crt_filename, 'w') as crt_file:
crt_file.write(crt)
gandi.echo('wrote %s' % crt_filename)
package = cert['package']
if 'bus' in package and intermediate:
gandi.echo('Business certs do not need intermediates.')
elif intermediate:
crtf = 'pem'
sha_version = cert['sha_version']
type_ = package.split('_')[1]
extra = ('sgc' if 'SGC' in package
and 'pro' in package
and sha_version == 1 else 'default')
if extra == 'sgc':
crtf = 'pem'
inters = gandi.certificate.urls[sha_version][type_][extra][crtf]
if isinstance(inters, basestring):
inters = [inters]
fhandle = open(cert_filename + '.inter.crt', 'w+b')
for inter in inters:
if inter.startswith('http'):
data = requests.get(inter).text
else:
data = inter
fhandle.write(data.encode('latin1'))
gandi.echo('wrote %s' % cert_filename + '.inter.crt')
fhandle.close()
return crt |
java | @Deprecated
public static JsonObject readFrom(Reader reader) throws IOException {
return JsonValue.readFrom(reader).asObject();
} |
java | @BetaApi
public final Instance getInstance(String instance) {
GetInstanceHttpRequest request =
GetInstanceHttpRequest.newBuilder().setInstance(instance).build();
return getInstance(request);
} |
python | def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if subs:
for (k, val) in subs:
contents = re.sub(k, val, contents)
if 'b' in TEXTFILE_FILE_WRITE_MODE:
try:
contents = bytearray(contents, 'utf-8')
except UnicodeDecodeError:
# contents is already utf-8 encoded python 2 str i.e. a byte array
contents = bytearray(contents)
return contents |
java | public DomainInner beginCreateOrUpdate(String resourceGroupName, String domainName, DomainInner domain) {
return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, domainName, domain).toBlocking().single().body();
} |
java | public static <T> List<T> loadAll(
Class<T> klass,
Iterable<Class<?>> hardcoded,
ClassLoader cl,
final PriorityAccessor<T> priorityAccessor) {
Iterable<T> candidates;
if (isAndroid(cl)) {
candidates = getCandidatesViaHardCoded(klass, hardcoded);
} else {
candidates = getCandidatesViaServiceLoader(klass, cl);
}
List<T> list = new ArrayList<>();
for (T current: candidates) {
if (!priorityAccessor.isAvailable(current)) {
continue;
}
list.add(current);
}
// Sort descending based on priority. If priorities are equal, compare the class names to
// get a reliable result.
Collections.sort(list, Collections.reverseOrder(new Comparator<T>() {
@Override
public int compare(T f1, T f2) {
int pd = priorityAccessor.getPriority(f1) - priorityAccessor.getPriority(f2);
if (pd != 0) {
return pd;
}
return f1.getClass().getName().compareTo(f2.getClass().getName());
}
}));
return Collections.unmodifiableList(list);
} |
java | public void sendInteger4(int val) throws IOException {
int4Buf[0] = (byte) (val >>> 24);
int4Buf[1] = (byte) (val >>> 16);
int4Buf[2] = (byte) (val >>> 8);
int4Buf[3] = (byte) (val);
pgOutput.write(int4Buf);
} |
python | def get_entries(path):
"""Return sorted lists of directories and files in the given path."""
dirs, files = [], []
for entry in os.listdir(path):
# Categorize entry as directory or file.
if os.path.isdir(os.path.join(path, entry)):
dirs.append(entry)
else:
files.append(entry)
dirs.sort()
files.sort()
return dirs, files |
python | def record_to_objects(self):
"""Create config records to match the file metadata"""
from ambry.orm.exc import NotFoundError
fr = self.record
contents = fr.unpacked_contents
if not contents:
return
# Zip transposes an array when in the form of a list of lists, so this transposes so
# each row starts with the heading and the rest of the row are the values
# for that row. The bool and filter return false when none of the values
# are non-empty. Then zip again to transpose to original form.
non_empty_rows = drop_empty(contents)
s = self._dataset._database.session
for i, row in enumerate(non_empty_rows):
if i == 0:
header = row
else:
d = dict(six.moves.zip(header, row))
if 'widths' in d:
del d['widths'] # Obsolete column in old spreadsheets.
if 'table' in d:
d['dest_table_name'] = d['table']
del d['table']
if 'order' in d:
d['stage'] = d['order']
del d['order']
if 'dest_table' in d:
d['dest_table_name'] = d['dest_table']
del d['dest_table']
if 'source_table' in d:
d['source_table_name'] = d['source_table']
del d['source_table']
d['d_vid'] = self._dataset.vid
d['state'] = 'synced'
try:
ds = self._dataset.source_file(str(d['name']))
ds.update(**d)
except NotFoundError:
name = d['name']
del d['name']
try:
ds = self._dataset.new_source(name, **d)
except:
print(name, d)
import pprint
pprint.pprint(d)
raise
except: # Odd error with 'none' in keys for d
print('!!!', header)
print('!!!', row)
raise
s.merge(ds)
self._dataset._database.commit() |
java | public static <T> Set<T> wrap(final Set<T> source) {
val list = new LinkedHashSet<T>();
if (source != null && !source.isEmpty()) {
list.addAll(source);
}
return list;
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.