language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @Override
public void eUnset(int featureID) {
switch (featureID) {
case AfplibPackage.GSMC__CELLWI:
setCELLWI(CELLWI_EDEFAULT);
return;
case AfplibPackage.GSMC__CELLHI:
setCELLHI(CELLHI_EDEFAULT);
return;
}
super.eUnset(featureID);
} |
java | public final boolean isRecurring() {
return recurrenceRule != null && !(recurrenceRule.get() == null) && !recurrenceRule.get().trim().equals(""); //$NON-NLS-1$
} |
java | public void execute() throws MongobeeException {
if (!isEnabled()) {
logger.info("Mongobee is disabled. Exiting.");
return;
}
validateConfig();
if (this.mongoClient != null) {
dao.connectMongoDb(this.mongoClient, dbName);
} else {
dao.connectMongoDb(this.mongoClientURI, dbName);
}
if (!dao.acquireProcessLock()) {
logger.info("Mongobee did not acquire process lock. Exiting.");
return;
}
logger.info("Mongobee acquired process lock, starting the data migration sequence..");
try {
executeMigration();
} finally {
logger.info("Mongobee is releasing process lock.");
dao.releaseProcessLock();
}
logger.info("Mongobee has finished his job.");
} |
java | public ServiceFuture<ExpressRouteCircuitAuthorizationInner> createOrUpdateAsync(String resourceGroupName, String circuitName, String authorizationName, ExpressRouteCircuitAuthorizationInner authorizationParameters, final ServiceCallback<ExpressRouteCircuitAuthorizationInner> serviceCallback) {
return ServiceFuture.fromResponse(createOrUpdateWithServiceResponseAsync(resourceGroupName, circuitName, authorizationName, authorizationParameters), serviceCallback);
} |
python | def printlet(flatten=False, **kwargs):
"""
Print chunks of data from a chain
:param flatten: whether to flatten data chunks
:param kwargs: keyword arguments as for :py:func:`print`
If ``flatten`` is :py:const:`True`, every chunk received is unpacked.
This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`.
Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`.
For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain:
.. code::
debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:]
"""
chunk = yield
if flatten:
while True:
print(*chunk, **kwargs)
chunk = yield chunk
else:
while True:
print(chunk, **kwargs)
chunk = yield chunk |
python | def _imgdata(self, width, height,
state_size=None, start='', dataset=''):
"""Generate image pixels.
Parameters
----------
width : `int`
Image width.
height : `int`
Image height.
state_size : `int` or `None`, optional
State size to use for generation (default: `None`).
start : `str`, optional
Initial state (default: '').
dataset : `str`, optional
Dataset key prefix (default: '').
Raises
------
RuntimeError
If generator is empty.
Returns
-------
`generator` of `int`
Pixel generator.
"""
size = width * height
if size > 0 and start:
yield state_to_pixel(start)
size -= 1
while size > 0:
prev_size = size
pixels = self.generate(state_size, start, dataset)
pixels = islice(pixels, 0, size)
for pixel in pixels:
yield state_to_pixel(pixel)
size -= 1
if prev_size == size:
if start:
yield from repeat(state_to_pixel(start), size)
else:
raise RuntimeError('empty generator') |
java | public MultiPolygonMarkers addMultiPolygonToMapAsMarkers(
GoogleMapShapeMarkers shapeMarkers, GoogleMap map,
MultiPolygonOptions multiPolygon,
MarkerOptions polygonMarkerOptions,
MarkerOptions polygonMarkerHoleOptions,
PolygonOptions globalPolygonOptions) {
MultiPolygonMarkers multiPolygonMarkers = new MultiPolygonMarkers();
for (PolygonOptions polygon : multiPolygon.getPolygonOptions()) {
PolygonMarkers polygonMarker = addPolygonToMapAsMarkers(
shapeMarkers, map, polygon, polygonMarkerOptions,
polygonMarkerHoleOptions, globalPolygonOptions);
shapeMarkers.add(polygonMarker);
multiPolygonMarkers.add(polygonMarker);
}
return multiPolygonMarkers;
} |
java | public List<KbPredicate> getMethodPredicates() {
Set<KbPredicate> set = new HashSet<KbPredicate>();
for (MethodObj method : getMethods()) {
set.add(method.getPredicate());
}
List<KbPredicate> list = new ArrayList(set);
Collections.sort(list, new Comparator<KbPredicate>() {
@Override
public int compare(KbPredicate o1, KbPredicate o2) {
if ((o2 == null) || !(o2 instanceof KbPredicate)) {
return 1;
}
return o1.toString().compareTo(o2.toString());
}
});
return list;
} |
java | public byte[] getByteArray(final String key, final byte[] def) {
try {
return systemRoot.getByteArray(fixKey(key), def);
} catch (final Exception e) {
// just eat the exception to avoid any system crash on system issues
return def;
}
} |
java | public String getLabel()
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
{
SibTr.entry(tc, "getLabel");
SibTr.exit(tc, "getLabel", label);
}
return label;
} |
python | def load_sequences_to_reference(self, sc=None, force_rerun=False):
"""Wrapper for _load_sequences_to_reference_gene"""
log.info('Loading sequences to reference GEM-PRO...')
from random import shuffle
g_ids = [g.id for g in self.reference_gempro.functional_genes]
shuffle(g_ids)
def _load_sequences_to_reference_gene_sc(g_id, outdir=self.sequences_by_gene_dir,
g_to_pickle=self.gene_protein_pickles,
strain_infodict=self.strain_infodict,
orth_matrix=self.df_orthology_matrix, force_rerun=force_rerun):
"""Load orthologous strain sequences to reference Protein object, save as new pickle"""
import ssbio.utils
import ssbio.io
from Bio import SeqIO
import os.path as op
protein_seqs_pickle_path = op.join(outdir, '{}_protein_withseqs.pckl'.format(g_id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=protein_seqs_pickle_path):
protein_pickle_path = g_to_pickle[g_id]
protein_pickle = ssbio.io.load_pickle(protein_pickle_path)
for strain, info in strain_infodict.items():
strain_sequences = SeqIO.index(info['genome_path'], 'fasta')
strain_gene_functional = info['functional_genes'][g_id]
if strain_gene_functional:
# Pull the gene ID of the strain from the orthology matrix
strain_gene_key = orth_matrix.at[g_id, strain]
new_id = '{}_{}'.format(g_id, strain)
if protein_pickle.sequences.has_id(new_id):
continue
protein_pickle.load_manual_sequence(seq=strain_sequences[strain_gene_key],
ident=new_id,
set_as_representative=False)
protein_pickle.save_pickle(outfile=protein_seqs_pickle_path)
return g_id, protein_seqs_pickle_path
if sc:
genes_rdd = sc.parallelize(g_ids)
result = genes_rdd.map(_load_sequences_to_reference_gene_sc).collect()
else:
result = []
for g in tqdm(g_ids):
result.append(self._load_sequences_to_reference_gene(g, force_rerun))
log.info('Storing paths to new Protein objects in self.gene_protein_pickles...')
updated = []
for g_id, protein_pickle in result:
self.gene_protein_pickles[g_id] = protein_pickle
updated.append(g_id)
not_updated = set(list(self.gene_protein_pickles.keys())).difference(updated)
log.info('No change to {} genes, removing from gene_protein_pickles'.format(len(not_updated)))
log.debug(not_updated)
for rem in not_updated:
del self.gene_protein_pickles[rem] |
java | protected long checksum_impl() {
long xs = 0x600DL;
int count = 0;
Field[] fields = Weaver.getWovenFields(this.getClass());
Arrays.sort(fields,
new Comparator<Field>() {
public int compare(Field field1, Field field2) {
return field1.getName().compareTo(field2.getName());
}
});
for (Field f : fields) {
final long P = MathUtils.PRIMES[count % MathUtils.PRIMES.length];
Class<?> c = f.getType();
if (c.isArray()) {
try {
f.setAccessible(true);
if (f.get(this) != null) {
if (c.getComponentType() == Integer.TYPE){
int[] arr = (int[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Float.TYPE) {
float[] arr = (float[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Double.TYPE) {
double[] arr = (double[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else if (c.getComponentType() == Long.TYPE){
long[] arr = (long[]) f.get(this);
xs = xs * P + (long) Arrays.hashCode(arr);
} else {
Object[] arr = (Object[]) f.get(this);
xs = xs * P + (long) Arrays.deepHashCode(arr);
} //else lead to ClassCastException
} else {
xs = xs * P;
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (ClassCastException t) {
throw H2O.fail(); //no support yet for int[][] etc.
}
} else {
try {
f.setAccessible(true);
Object value = f.get(this);
if (value != null) {
xs = xs * P + (long)(value.hashCode());
} else {
xs = xs * P + P;
}
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
count++;
}
xs ^= (train() == null ? 43 : train().checksum()) * (valid() == null ? 17 : valid().checksum());
return xs;
} |
python | def p_insertx(self,t):
"expression : kw_insert kw_into NAME opt_paren_namelist kw_values '(' commalist ')' opt_returnx"
t[0] = InsertX(t[3],t[4],t[7].children,t[9]) |
python | def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb |
java | private boolean connected(NodeCursor start, long end, int[][] typedDirections) {
try (RelationshipTraversalCursor relationship = ktx.cursors().allocateRelationshipTraversalCursor()) {
start.allRelationships(relationship);
while (relationship.next()) {
if (relationship.neighbourNodeReference() ==end) {
if (typedDirections==null) {
return true;
} else {
int direction = relationship.targetNodeReference() == end ? 0 : 1 ;
int[] types = typedDirections[direction];
if (arrayContains(types, relationship.type())) return true;
}
}
}
}
return false;
} |
python | def stop_notifications(self):
"""Stop the notifications thread.
:returns:
"""
with self._notifications_lock:
if not self.has_active_notification_thread:
return
thread = self._notifications_thread
self._notifications_thread = None
stopping = thread.stop()
api = self._get_api(mds.NotificationsApi)
api.delete_long_poll_channel()
return stopping.wait() |
java | public static void assertXpathsEqual(String controlXpath, String testXpath,
Document document)
throws XpathException {
assertXpathsEqual(controlXpath, document, testXpath, document);
} |
python | def getItemLocals(item):
'''
Iterate the locals of an item and yield (name,valu) pairs.
Example:
for name,valu in getItemLocals(item):
dostuff()
'''
for name in dir(item):
try:
valu = getattr(item, name, None)
yield name, valu
except Exception:
pass |
java | private MultiValueMap<String, String> union(MultiValueMap<String, String> map1, MultiValueMap<String, String> map2) {
MultiValueMap<String, String> union = new LinkedMultiValueMap<String, String>(map1);
Set<Entry<String, List<String>>> map2Entries = map2.entrySet();
for (Iterator<Entry<String, List<String>>> entryIt = map2Entries.iterator(); entryIt.hasNext();) {
Entry<String, List<String>> entry = entryIt.next();
String key = entry.getKey();
List<String> values = entry.getValue();
for (String value : values) {
union.add(key, value);
}
}
return union;
} |
python | def handle_read(repo, **kwargs):
"""handles reading repo information"""
log.info('read: %s %s' %(repo, kwargs))
if type(repo) in [unicode, str]:
return {'name': 'Repo', 'desc': 'Welcome to Grit', 'comment': ''}
else:
return repo.serialize() |
python | def _first_of_quarter(self, day_of_week=None):
"""
Modify to the first occurrence of a given day of the week
in the current quarter. If no day_of_week is provided,
modify to the first day of the quarter. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:type day_of_week: int or None
:rtype: Date
"""
return self.set(self.year, self.quarter * 3 - 2, 1).first_of(
"month", day_of_week
) |
java | public EEnum getPGPRGPGorient() {
if (pgprgpGorientEEnum == null) {
pgprgpGorientEEnum = (EEnum)EPackage.Registry.INSTANCE.getEPackage(AfplibPackage.eNS_URI).getEClassifiers().get(125);
}
return pgprgpGorientEEnum;
} |
java | public Observable<LabAccountInner> createOrUpdateAsync(String resourceGroupName, String labAccountName, LabAccountInner labAccount) {
return createOrUpdateWithServiceResponseAsync(resourceGroupName, labAccountName, labAccount).map(new Func1<ServiceResponse<LabAccountInner>, LabAccountInner>() {
@Override
public LabAccountInner call(ServiceResponse<LabAccountInner> response) {
return response.body();
}
});
} |
python | def __method_descriptor(self, service, method_info,
protorpc_method_info):
"""Describes a method.
Args:
service: endpoints.Service, Implementation of the API as a service.
method_info: _MethodInfo, Configuration for the method.
protorpc_method_info: protorpc.remote._RemoteMethodInfo, ProtoRPC
description of the method.
Returns:
Dictionary describing the method.
"""
descriptor = {}
request_message_type = (resource_container.ResourceContainer.
get_request_message(protorpc_method_info.remote))
request_kind = self.__get_request_kind(method_info)
remote_method = protorpc_method_info.remote
method_id = method_info.method_id(service.api_info)
path = method_info.get_path(service.api_info)
description = protorpc_method_info.remote.method.__doc__
descriptor['id'] = method_id
descriptor['path'] = path
descriptor['httpMethod'] = method_info.http_method
if description:
descriptor['description'] = description
descriptor['scopes'] = [
'https://www.googleapis.com/auth/userinfo.email'
]
parameters = self.__params_descriptor(
request_message_type, request_kind, path, method_id,
method_info.request_params_class)
if parameters:
descriptor['parameters'] = parameters
if method_info.request_params_class:
parameter_order = self.__params_order_descriptor(
method_info.request_params_class, path, is_params_class=True)
else:
parameter_order = self.__params_order_descriptor(
request_message_type, path, is_params_class=False)
if parameter_order:
descriptor['parameterOrder'] = parameter_order
request_descriptor = self.__request_message_descriptor(
request_kind, request_message_type, method_id,
method_info.request_body_class)
if request_descriptor is not None:
descriptor['request'] = request_descriptor
response_descriptor = self.__response_message_descriptor(
remote_method.response_type(), method_info.method_id(service.api_info))
if response_descriptor is not None:
descriptor['response'] = response_descriptor
return descriptor |
python | def _get_gc2_coordinates_for_rupture(self, edge_sets):
"""
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
"""
# Establish GC2 length - for use with Ry0
rup_gc2t, rup_gc2u = self.get_generalised_coordinates(
edge_sets[:, 0], edge_sets[:, 1])
# GC2 length should be the largest positive GC2 value of the edges
self.gc_length = numpy.max(rup_gc2u) |
java | public final void setDialogHeaderIcon(@DrawableRes final int resourceId) {
this.dialogHeaderIcon = AppCompatResources.getDrawable(getContext(), resourceId);
this.dialogHeaderIconBitmap = null;
this.dialogHeaderIconId = resourceId;
} |
java | public static Matrix reduce(Matrix source) {
Matrix response = Matrix.Factory.zeros(source.getRowCount(), 1);
for (int row = 0; row < source.getRowCount(); ++row) {
response.setAsDouble(row, row, 0);
}
return source.getRowCount() == source.getColumnCount() ? Ginv.reduce(source, response)
: response;
} |
python | def _normalize(image):
"""Normalize the image to zero mean and unit variance."""
offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])
image -= offset
scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])
image /= scale
return image |
python | def parse_skewer_log(self, f):
""" Go through log file looking for skewer output """
fh = f['f']
regexes = {
'fq1': "Input file:\s+(.+)",
'fq2': "Paired file:\s+(.+)",
'r_processed': "(\d+) read|reads pairs? processed",
'r_short_filtered': "(\d+) \(\s*\d+.\d+%\) short read",
'r_empty_filtered': "(\d+) \(\s*\d+.\d+%\) empty read",
'r_avail': "(\d+) \(\s*\d+.\d+%\) read",
'r_trimmed': "(\d+) \(\s*\d+.\d+%\) trimmed read",
'r_untrimmed': "(\d+) \(\s*\d+.\d+%\) untrimmed read"
}
regex_hist = "\s?(\d+)\s+(\d+)\s+(\d+.\d+)%"
data = dict()
for k, v in regexes.items():
data[k] = 0
data['fq1'] = None
data['fq2'] = None
readlen_dist = OrderedDict()
for l in fh:
for k, r in regexes.items():
match = re.search(r, l)
if match:
data[k] = match.group(1).replace(',', '')
match = re.search(regex_hist, l)
if match:
read_length = int(match.group(1))
pct_at_rl = float(match.group(3))
readlen_dist[read_length] = pct_at_rl
if data['fq1'] is not None:
s_name = self.clean_s_name(data['fq1'], f['root'])
if s_name in self.skewer_readlen_dist:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.add_skewer_data(s_name, data, f)
self.skewer_readlen_dist[s_name] = readlen_dist
if data['fq2'] is not None:
s_name = self.clean_s_name(data['fq1'], f['root'])
if s_name in self.skewer_readlen_dist:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.add_skewer_data(s_name, data, f)
self.skewer_readlen_dist[s_name] = readlen_dist |
java | @Nonnull
public final Launcher decorateByEnv(@Nonnull EnvVars _env) {
final EnvVars env = new EnvVars(_env);
final Launcher outer = this;
return new Launcher(outer) {
@Override
public boolean isUnix() {
return outer.isUnix();
}
@Override
public Proc launch(ProcStarter starter) throws IOException {
EnvVars e = new EnvVars(env);
if (starter.envs!=null) {
for (String env : starter.envs) {
e.addLine(env);
}
}
starter.envs = Util.mapToEnv(e);
return outer.launch(starter);
}
@Override
public Channel launchChannel(String[] cmd, OutputStream out, FilePath workDir, Map<String, String> envVars) throws IOException, InterruptedException {
EnvVars e = new EnvVars(env);
e.putAll(envVars);
return outer.launchChannel(cmd,out,workDir,e);
}
@Override
public void kill(Map<String, String> modelEnvVars) throws IOException, InterruptedException {
outer.kill(modelEnvVars);
}
};
} |
java | public static Observable<byte[]> encode(Observable<String> src, final CharsetEncoder charsetEncoder) {
return src.map(new Func1<String, byte[]>() {
@Override
public byte[] call(String str) {
CharBuffer cb = CharBuffer.wrap(str);
ByteBuffer bb;
try {
bb = charsetEncoder.encode(cb);
} catch (CharacterCodingException e) {
throw new RuntimeException(e);
}
return Arrays.copyOfRange(bb.array(), bb.position(), bb.limit());
}
});
} |
java | @Override
public Object getData() {
Object data = super.getData();
// Treat "empty" the same as null (ie no selection)
boolean empty = false;
if (data == null) {
empty = true;
} else if (data instanceof List<?>) {
empty = ((List<?>) data).isEmpty();
} else if (data instanceof Object[]) {
empty = ((Object[]) data).length == 0;
}
// Check if we need to default to the first option
if (empty && !isAllowNoSelection()) {
List<?> options = getOptions();
if (options != null && !options.isEmpty()) {
// Check if NULL is an option
if (SelectListUtil.containsOption(options, null)) {
return Arrays.asList(new Object[]{null});
} else {
// Use the first option
Object firstOption = SelectListUtil.getFirstOption(options);
return Arrays.asList(new Object[]{firstOption});
}
}
}
return data;
} |
python | def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
) |
python | def q_prior(q, m=1, gamma=0.3, qmin=0.1):
"""Default prior on mass ratio q ~ q^gamma
"""
if q < qmin or q > 1:
return 0
C = 1/(1/(gamma+1)*(1 - qmin**(gamma+1)))
return C*q**gamma |
java | public SqlBuilder having(String having) {
if (StrUtil.isNotBlank(having)) {
sql.append(" HAVING ").append(having);
}
return this;
} |
java | public static LongDeserializer getDeserializer(int longSize, ByteBuffer fromBuffer, int bufferOffset)
{
// The buffer needs to be duplicated since the byte order is changed
ByteBuffer buffer = fromBuffer.duplicate().order(ByteOrder.BIG_ENDIAN);
switch (longSize) {
case 1:
return new Size1Des(buffer, bufferOffset);
case 2:
return new Size2Des(buffer, bufferOffset);
case 4:
return new Size4Des(buffer, bufferOffset);
case 8:
return new Size8Des(buffer, bufferOffset);
case 12:
return new Size12Des(buffer, bufferOffset);
case 16:
return new Size16Des(buffer, bufferOffset);
case 20:
return new Size20Des(buffer, bufferOffset);
case 24:
return new Size24Des(buffer, bufferOffset);
case 32:
return new Size32Des(buffer, bufferOffset);
case 40:
return new Size40Des(buffer, bufferOffset);
case 48:
return new Size48Des(buffer, bufferOffset);
case 56:
return new Size56Des(buffer, bufferOffset);
case 64:
return new Size64Des(buffer, bufferOffset);
default:
throw new IAE("Unsupported size %s", longSize);
}
} |
java | public boolean isSecuredByPermission(final String permission,
UserIdentityContext userIdentityContext) {
try {
checkPermission(permission,userIdentityContext);
return true;
} catch (AuthorizationException e) {
return false;
}
} |
python | def get_saved_rules(conf_file=None):
'''
Return a data structure of the rules in the conf file
CLI Example:
.. code-block:: bash
salt '*' nftables.get_saved_rules
'''
if _conf() and not conf_file:
conf_file = _conf()
with salt.utils.files.fopen(conf_file) as fp_:
lines = salt.utils.data.decode(fp_.readlines())
rules = []
for line in lines:
tmpline = line.strip()
if not tmpline:
continue
if tmpline.startswith('#'):
continue
rules.append(line)
return rules |
python | def _convert_sky_coords(self):
"""
Convert to sky coordinates
"""
parsed_angles = [(x, y)
for x, y in zip(self.coord[:-1:2], self.coord[1::2])
if (isinstance(x, coordinates.Angle) and isinstance(y, coordinates.Angle))
]
frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)
lon, lat = zip(*parsed_angles)
if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:
# force entries to be scalar if they are length-1
lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])
else:
# otherwise, they are vector quantities
lon, lat = u.Quantity(lon), u.Quantity(lat)
sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)
coords = [SkyCoord(frame(sphcoords))]
if self.region_type != 'polygon':
coords += self.coord[len(coords * 2):]
return coords |
python | def _convert_to_bytes(type_name, value):
"""Convert a typed value to a binary array"""
int_types = {'uint8_t': 'B', 'int8_t': 'b', 'uint16_t': 'H', 'int16_t': 'h', 'uint32_t': 'L', 'int32_t': 'l'}
type_name = type_name.lower()
if type_name not in int_types and type_name not in ['string', 'binary']:
raise ArgumentError('Type must be a known integer type, integer type array, string', known_integers=int_types.keys(), actual_type=type_name)
if type_name == 'string':
#value should be passed as a string
bytevalue = bytes(value)
elif type_name == 'binary':
bytevalue = bytes(value)
else:
bytevalue = struct.pack("<%s" % int_types[type_name], value)
return bytevalue |
java | private void throwExStrParam(MethodVisitor mv, Class<?> exCls) {
String exSig = Type.getInternalName(exCls);
mv.visitTypeInsn(NEW, exSig);
mv.visitInsn(DUP);
mv.visitLdcInsn("mapping " + this.className + " failed to map field:");
mv.visitVarInsn(ALOAD, 2);
mv.visitMethodInsn(INVOKEVIRTUAL, "java/lang/String", "concat", "(Ljava/lang/String;)Ljava/lang/String;");
mv.visitMethodInsn(INVOKESPECIAL, exSig, "<init>", "(Ljava/lang/String;)V");
mv.visitInsn(ATHROW);
} |
python | def attrget(self, groupname, attrname, rownr):
"""Get the value of an attribute in the given row in a group."""
return self._attrget(groupname, attrname, rownr) |
python | def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0) |
java | private Kam lookupKam(KamNode kamNode, Dialect dialect,
final String errorMsg)
throws KamCacheServiceException, InvalidIdException,
RequestException {
KamStoreObjectRef kamNodeRef = Converter.decodeNode(kamNode);
KamInfo kamInfo = null;
try {
kamInfo = kamCatalogDao.getKamInfoById(kamNodeRef.getKamInfoId());
} catch (SQLException e) {
throw new RequestException(errorMsg, e);
}
if (kamInfo == null) {
throw new InvalidIdException(kamNodeRef.getEncodedString());
}
final Kam kam = kamCacheService.getKam(kamInfo.getName());
if (kam == null) {
throw new InvalidIdException(kamNodeRef.getEncodedString());
}
return kam;
} |
java | public static <K, V> CacheEvent<K, V> creation(K newKey, V newValue, Cache<K, V> source) {
return new CreationEvent<>(newKey, newValue, source);
} |
python | def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
try:
del self.connected
except AttributeError:
return
self.quit(message)
try:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
except socket.error:
pass
del self.socket
self._handle_event(Event("disconnect", self.server, "", [message])) |
python | def source_headers(self):
""""Returns the headers for the resource source. Specifically, does not include any header that is
the EMPTY_SOURCE_HEADER value of _NONE_"""
t = self.schema_term
if t:
return [self._name_for_col_term(c, i)
for i, c in enumerate(t.children, 1) if c.term_is("Table.Column")
and c.get_value('name') != EMPTY_SOURCE_HEADER
]
else:
return None |
python | def sigma2(self,R,log=False):
"""
NAME:
sigma2
PURPOSE:
return the radial velocity variance at this R
INPUT:
R - Galactocentric radius (/ro)
log - if True, return the log (default: False)
OUTPUT:
sigma^2(R)
HISTORY:
2010-03-26 - Written - Bovy (NYU)
"""
if log:
return 2.*sc.log(self._params[2])-2.*(R-1.)/self._params[1]
else:
return self._params[2]**2.*sc.exp(-2.*(R-1.)/self._params[1]) |
java | @SuppressWarnings({"unchecked"})
public static <T> Iterator<T> cast(Iterator<? extends T> itr) {
return (Iterator)itr;
} |
java | public static base_response add(nitro_service client, iptunnel resource) throws Exception {
iptunnel addresource = new iptunnel();
addresource.name = resource.name;
addresource.remote = resource.remote;
addresource.remotesubnetmask = resource.remotesubnetmask;
addresource.local = resource.local;
addresource.protocol = resource.protocol;
addresource.ipsecprofilename = resource.ipsecprofilename;
return addresource.add_resource(client);
} |
python | def _agent_notification(self, context, method, hosting_devices, operation):
"""Notify individual Cisco cfg agents."""
admin_context = context.is_admin and context or context.elevated()
for hosting_device in hosting_devices:
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, hosting_device['id'], admin_state_up=True,
schedule=True)
for agent in agents:
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s',
{'agent_type': agent.agent_type,
'topic': agent.topic,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host)
cctxt.cast(context, method) |
java | private void internalWrite(final byte[] data) throws IOException {
if (!canBeContinued(data, policies)) {
writer.close();
String fileName = path.resolve();
deleteBackups(path.getAllFiles(), backups);
writer = createByteArrayWriter(fileName, false, buffered, false, false);
for (Policy policy : policies) {
policy.reset();
}
}
writer.write(data, data.length);
} |
python | def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int or list of int
The index to be updated.
"""
if not isinstance(index, (list, tuple)):
index = [index]
for idx in index:
if idx not in self._index_update_count:
self._index_update_count[idx] = self.begin_num_update
self._index_update_count[idx] += 1
self.num_update = max(self._index_update_count[idx], self.num_update) |
java | @Override
public long next(){
long l;
do{
l = super.next();
} while (l >= loopSpot);
return offset + l % range;
} |
java | @SuppressWarnings("rawtypes")
@Override
protected Object convertToType(Class type, Object value) throws Throwable {
String[] strings = value.toString().split(",");
if (strings.length != 2) {
throw new ConversionException(
"GeoPt 'value' must be able to be splitted into 2 float values "
+ "by ',' (latitude,longitude)");
}
try {
float latitude = new BigDecimal(strings[0].trim()).floatValue();
float longitude = new BigDecimal(strings[1].trim()).floatValue();
return new GeoPt(latitude, longitude);
} catch (Exception e) {
throw new ConversionException(
"Cannot parse GeoPt value into 2 float values: "
+ "latitude [" + strings[0].trim()
+ "], longitude [" + strings[1].trim() + "]");
}
} |
java | protected final List<String> getSessionPermissions() {
if (sessionTracker != null) {
Session currentSession = sessionTracker.getSession();
return (currentSession != null) ? currentSession.getPermissions() : null;
}
return null;
} |
python | def download(self, obj, path=None, show_progress=True, resume=True,
auto_retry=True, proapi=False):
"""
Download a file
:param obj: :class:`.File` object
:param str path: local path
:param bool show_progress: whether to show download progress
:param bool resume: whether to resume on unfinished downloads
identified by filename
:param bool auto_retry: whether to retry automatically upon closed
transfer until the file's download is finished
:param bool proapi: whether to use pro API
"""
url = obj.get_download_url(proapi)
download(url, path=path, session=self.http.session,
show_progress=show_progress, resume=resume,
auto_retry=auto_retry) |
java | public static PortalRequest unwrapPortalRequest(HttpServletRequest request) {
do {
if (request instanceof PortalRequest) {
return (PortalRequest) request;
} else if (request instanceof HttpServletRequestWrapper) {
request = (HttpServletRequest) ((HttpServletRequestWrapper) request).getRequest();
} else {
return null;
}
} while (true);
} |
python | def romanized(locale: str = '') -> Callable:
"""Romanize the Cyrillic text.
Transliterate the Cyrillic language from the Cyrillic
script into the Latin alphabet.
.. note:: At this moment it works only for `ru`, `uk`, `kk`.
:param locale: Locale code.
:return: Latinized text.
"""
def romanized_deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
# String can contain ascii symbols, digits and
# punctuation symbols.
alphabet = {s: s for s in
letters + digits + punctuation}
alphabet.update(data.ROMANIZATION_DICT[locale])
# Add common cyrillic letters
alphabet.update(data.COMMON_LETTERS)
except KeyError:
raise UnsupportedLocale(locale)
result = func(*args, **kwargs)
txt = ''.join([alphabet[i] for i in result if i in alphabet])
return txt
return wrapper
return romanized_deco |
python | def filter(self, func):
"""Create a Catalog of a subset of entries based on a condition
Note that, whatever specific class this is performed on, the return
instance is a Catalog. The entries are passed unmodified, so they
will still reference the original catalog instance and include its
details such as directory,.
Parameters
----------
func : function
This should take a CatalogEntry and return True or False. Those
items returning True will be included in the new Catalog, with the
same entry names
Returns
-------
New Catalog
"""
return Catalog.from_dict({key: entry for key, entry in self.items()
if func(entry)}) |
python | def parse_date_period(date):
"""
Parse the --date value and return a couple of datetime object.
The format is [YYYY]MMDD[,[YYYY]MMDD].
"""
import datetime
now = datetime.datetime.today()
date_len = len(date)
if date_len == 4:
date1 = str(now.year) + date
date2 = str(now.year) + date + "235959"
elif date_len == 8:
date1 = date
date2 = date + "235959"
elif date_len == 9:
if date[4] != ',':
raise TypeError
date1 = str(now.year) + date[0:4]
date2 = str(now.year) + date[5:9] + "235959"
elif date_len == 17:
if date[8] != ',':
raise TypeError
date1 = date[0:8]
date2 = date[9:17] + "235959"
else:
raise TypeError
try:
date1 = datetime.datetime.strptime(date1, "%Y%m%d")
except ValueError:
if date_len < 9:
raise ValueError("Error of date value in --date parameter, use --date=[YYYY]MMDD")
else:
raise ValueError("Error in the first date value in --date parameter, "
"use --date=[YYYY]MMDD,[YYYY]MMDD")
try:
date2 = datetime.datetime.strptime(date2, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Error in the second date value in --date parameter, "
"use --date=[YYYY]MMDD,[YYYY]MMDD")
if date1 > date2:
raise ValueError("Wrong parameter --date: the first date is after the second!")
return date1, date2 |
java | public EClass getIfcLayeredItem() {
if (ifcLayeredItemEClass == null) {
ifcLayeredItemEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI)
.getEClassifiers().get(958);
}
return ifcLayeredItemEClass;
} |
python | def _set_signed_in(self):
"""Populate the signed_in list with the names of currently
signed in users.
"""
names = [
controller.get_user_name(user, full_name=CONFIG['FULL_USER_NAMES'])
for user in controller.signed_in_users()
]
self.lbl_signedin_list.setText('\n'.join(sorted(names))) |
python | def __compare_ips(first, second):
"""Compare IPs
Compares two IPs and returns a status based on which is greater
If first is less than second: -1
If first is equal to second: 0
If first is greater than second: 1
Arguments:
first {str} -- A string representing an IP address
second {str} -- A string representing an IP address
Returns:
int
"""
# If the two IPs are the same, return 0
if first == second:
return 0
# Create lists from the split of each IP, store them as ints
lFirst = [int(i) for i in first.split('.')]
lSecond = [int(i) for i in second.split('.')]
# Go through each part from left to right until we find the
# difference
for i in [0, 1, 2, 3]:
# If the part of x is greater than the part of y
if lFirst[i] > lSecond[i]:
return 1
# Else if the part of x is less than the part of y
elif lFirst[i] < lSecond[i]:
return -1 |
python | def load_data(self, table_name, obj, database=None, **kwargs):
"""
Wraps the LOAD DATA DDL statement. Loads data into an MapD table by
physically moving data files.
Parameters
----------
table_name : string
obj: pandas.DataFrame or pyarrow.Table
database : string, default None (optional)
"""
_database = self.db_name
self.set_database(database)
self.con.load_table(table_name, obj, **kwargs)
self.set_database(_database) |
python | def add_tarball(self, tarball, package):
"""Add a tarball, possibly creating the directory if needed."""
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info("Created %s", target_dir)
logger.info("Copying tarball to %s", target_dir)
shutil.copy(tarball, target_dir) |
python | def wait_until_element_clickable(self, element, timeout=None):
"""Search element and wait until it is clickable
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is clickable
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not clickable after the timeout
"""
return self._wait_until(self._expected_condition_find_element_clickable, element, timeout) |
python | def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step) |
python | def table_format(self, columns, content):
"""
Enumerate each table column.
"""
result = []
textw = TextWidget()
for row in content:
i = 0
result_row = []
for cell in row:
result_row.append(textw.render(cell, **columns[i]))
i += 1
result.append(result_row)
return (columns, result) |
python | def sequence(self, other, exclude_list_fields=None):
"""Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
"""
exclude_list_fields = frozenset(exclude_list_fields or [])
overwrite_kwargs = {}
nonexistent_excluded_fields = exclude_list_fields - self._list_fields
if nonexistent_excluded_fields:
raise self.AlgebraicDataError(
"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. "
"This object is {}, the other object is {}."
.format(nonexistent_excluded_fields, self._list_fields, self, other))
shared_list_fields = (self._list_fields
& other._list_fields
- exclude_list_fields)
if not shared_list_fields:
raise self.AlgebraicDataError(
"Objects to sequence have no shared fields after excluding {}. "
"This object is {}, with list fields: {}. "
"The other object is {}, with list fields: {}."
.format(exclude_list_fields, self, self._list_fields, other, other._list_fields))
for list_field_name in shared_list_fields:
lhs_value = getattr(self, list_field_name)
rhs_value = getattr(other, list_field_name)
overwrite_kwargs[list_field_name] = lhs_value + rhs_value
return self.copy(**overwrite_kwargs) |
java | public static CmsResource initResource(
CmsObject cms,
String resourceName,
HttpServletRequest req,
HttpServletResponse res)
throws CmsException {
return OpenCmsCore.getInstance().initResource(cms, resourceName, req, res);
} |
java | public static <G extends Relatable<?>> Context downset(final CompleteLattice lattice) {
List<Object> elements = Arrays.asList(lattice.toArray());
return new Context(elements, elements, new NotGreaterOrEqual());
} |
java | public static Object invoke(ApplicationContext context, String beanName,
MethodInfo methodInfo, final Object[] params) throws IllegalArgumentException,
IllegalAccessException, InvocationTargetException {
Object bean = context.getBean(beanName);
Method handlerMethod = methodInfo.getMethod();
ReflectionUtils.makeAccessible(handlerMethod);
Object result = handlerMethod.invoke(bean, params);
if (result != null && result instanceof Optional) {
return ((Optional<?>) result).orElse(null);
}
return result;
} |
java | private FileSystem getFileSystemSafe() throws IOException
{
try {
fs.getFileStatus(new Path("/"));
return fs;
}
catch (NullPointerException e) {
throw new IOException("file system not initialized");
}
} |
python | def parse_nni_variable(code):
"""Parse `nni.variable` expression.
Return the name argument and AST node of annotated expression.
code: annotation string
"""
name, call = parse_annotation_function(code, 'variable')
assert len(call.args) == 1, 'nni.variable contains more than one arguments'
arg = call.args[0]
assert type(arg) is ast.Call, 'Value of nni.variable is not a function call'
assert type(arg.func) is ast.Attribute, 'nni.variable value is not a NNI function'
assert type(arg.func.value) is ast.Name, 'nni.variable value is not a NNI function'
assert arg.func.value.id == 'nni', 'nni.variable value is not a NNI function'
name_str = astor.to_source(name).strip()
keyword_arg = ast.keyword(arg='name', value=ast.Str(s=name_str))
arg.keywords.append(keyword_arg)
if arg.func.attr == 'choice':
convert_args_to_dict(arg)
return name, arg |
python | def resolve_job_references(io_hash, job_outputs, should_resolve=True):
'''
:param io_hash: an input or output hash in which to resolve any job-based object references possible
:type io_hash: dict
:param job_outputs: a mapping of finished local jobs to their output hashes
:type job_outputs: dict
:param should_resolve: whether it is an error if a job-based object reference in *io_hash* cannot be resolved yet
:type should_resolve: boolean
Modifies *io_hash* in-place.
'''
q = []
for field in io_hash:
if is_job_ref(io_hash[field]):
io_hash[field] = resolve_job_ref(io_hash[field], job_outputs, should_resolve)
elif isinstance(io_hash[field], list) or isinstance(io_hash[field], dict):
q.append(io_hash[field])
while len(q) > 0:
thing = q.pop()
if isinstance(thing, list):
for i in range(len(thing)):
if is_job_ref(thing[i]):
thing[i] = resolve_job_ref(thing[i], job_outputs, should_resolve)
elif isinstance(thing[i], list) or isinstance(thing[i], dict):
q.append(thing[i])
else:
for field in thing:
if is_job_ref(thing[field]):
thing[field] = resolve_job_ref(thing[field], job_outputs, should_resolve)
elif isinstance(thing[field], list) or isinstance(thing[field], dict):
q.append(thing[field]) |
python | def set_session(s):
"""
Configures the default connection with a preexisting :class:`cassandra.cluster.Session`
Note: the mapper presently requires a Session :attr:`~.row_factory` set to ``dict_factory``.
This may be relaxed in the future
"""
try:
conn = get_connection()
except CQLEngineException:
# no default connection set; initalize one
register_connection('default', session=s, default=True)
conn = get_connection()
if conn.session:
log.warning("configuring new default connection for cqlengine when one was already set")
if s.row_factory is not dict_factory:
raise CQLEngineException("Failed to initialize: 'Session.row_factory' must be 'dict_factory'.")
conn.session = s
conn.cluster = s.cluster
# Set default keyspace from given session's keyspace
if conn.session.keyspace:
from cassandra.cqlengine import models
models.DEFAULT_KEYSPACE = conn.session.keyspace
conn.setup_session()
log.debug("cqlengine default connection initialized with %s", s) |
python | def get_repository(name):
'''
Get the details of a local PSGet repository
:param name: Name of the repository
:type name: ``str``
CLI Example:
.. code-block:: bash
salt 'win01' psget.get_repository MyRepo
'''
# Putting quotes around the parameter protects against command injection
cmd = 'Get-PSRepository "{0}"'.format(name)
no_ret = _pshell(cmd)
return name not in list_modules() |
python | def get_data_port_m(self, data_port_id):
"""Searches and returns the model of a data port of a given state
The method searches a port with the given id in the data ports of the given state model. If the state model
is a container state, not only the input and output data ports are looked at, but also the scoped variables.
:param data_port_id: The data port id to be searched
:return: The model of the data port or None if it is not found
"""
from itertools import chain
data_ports_m = chain(self.input_data_ports, self.output_data_ports)
for data_port_m in data_ports_m:
if data_port_m.data_port.data_port_id == data_port_id:
return data_port_m
return None |
java | private void evaluateDeferredAfterMaskUpdate() {
final Iterator<DeferredAction<M>> it = this.deferred.iterator();
while (it.hasNext()) {
final DeferredAction<M> d = it.next();
/* is the current action still masked? */
if (masks.stream().anyMatch(p -> p.test(d.metadata))) {
continue;
}
it.remove();
d.runnable.run();
}
} |
python | def wait_for_task(upid, timeout=300):
'''
Wait until a the task has been finished successfully
'''
start_time = time.time()
info = _lookup_proxmox_task(upid)
if not info:
log.error('wait_for_task: No task information '
'retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if 'status' in info and info['status'] == 'OK':
log.debug('Task has been finished!')
return True
time.sleep(3) # Little more patience, we're not in a hurry
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for task to be finished')
return False
info = _lookup_proxmox_task(upid) |
python | def has_same_unique_benchmark(self):
"True if all suites have one benchmark with the same name"
if any(len(suite) > 1 for suite in self.suites):
return False
names = self.suites[0].get_benchmark_names()
return all(suite.get_benchmark_names() == names
for suite in self.suites[1:]) |
java | public static Map<String, Object> getCommonParams(JsonNode paramsNode)
{
Map<String, Object> parameters = new HashMap<>();
for (JsonNode child : paramsNode)
{
// If there isn't a name then the response from GS must be erroneous.
if (!child.hasNonNull("name"))
{
logger.error("Common Parameter JsonNode encountered with "
+ "no parameter name!");
continue;
}
// Look up the parameter based on the "name" attribute of the node.
String paramName = child.path("name").asText();
// What type of value is it and what's the value?
if (!child.hasNonNull("value"))
{
logger.debug("No value found for Common Parameter {}",
child.path("name").asText());
continue;
}
if (STRING_PARAMS.contains(paramName.toUpperCase()))
{
parameters.put(paramName, child.path("value").asText());
}
else if (INT_PARAMS.contains(paramName.toUpperCase()))
{
parameters.put(paramName, child.path("value").asInt());
}
else if (BOOLEAN_PARAMS.contains(paramName.toUpperCase()))
{
parameters.put(paramName, child.path("value").asBoolean());
}
else
{
logger.debug("Unknown Common Parameter: {}", paramName);
}
logger.debug("Parameter {}: {}",
paramName, child.path("value").asText());
}
return parameters;
} |
python | def lit_count(self):
"""
The number of LEDs on the bar graph actually lit up. Note that just
like :attr:`value`, this can be negative if the LEDs are lit from last
to first.
"""
lit_value = self.value * len(self)
if not isinstance(self[0], PWMLED):
lit_value = int(lit_value)
return lit_value |
python | def imread(img, color=None, dtype=None):
'''
dtype = 'noUint', uint8, float, 'float', ...
'''
COLOR2CV = {'gray': cv2.IMREAD_GRAYSCALE,
'all': cv2.IMREAD_COLOR,
None: cv2.IMREAD_ANYCOLOR
}
c = COLOR2CV[color]
if callable(img):
img = img()
elif isinstance(img, string_types):
# from_file = True
# try:
# ftype = img[img.find('.'):]
# img = READERS[ftype](img)[0]
# except KeyError:
# open with openCV
# grey - 8 bit
if dtype in (None, "noUint") or np.dtype(dtype) != np.uint8:
c |= cv2.IMREAD_ANYDEPTH
img2 = cv2.imread(img, c)
if img2 is None:
raise IOError("image '%s' is not existing" % img)
img = img2
elif color == 'gray' and img.ndim == 3: # multi channel img like rgb
# cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cannot handle float64
img = toGray(img)
# transform array to uint8 array due to openCV restriction
if dtype is not None:
if isinstance(img, np.ndarray):
img = _changeArrayDType(img, dtype, cutHigh=False)
return img |
java | public OAuthHmacCredential load10aCredential(String userId) throws IOException {
if (getCredentialStore() == null) {
return null;
}
OAuthHmacCredential credential = new10aCredential(userId);
if (!getCredentialStore().load(userId, credential)) {
return null;
}
return credential;
} |
java | public static com.liferay.commerce.model.CommerceOrderItem createCommerceOrderItem(
long commerceOrderItemId) {
return getService().createCommerceOrderItem(commerceOrderItemId);
} |
java | public void marshall(User user, ProtocolMarshaller protocolMarshaller) {
if (user == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(user.getConsoleAccess(), CONSOLEACCESS_BINDING);
protocolMarshaller.marshall(user.getGroups(), GROUPS_BINDING);
protocolMarshaller.marshall(user.getPassword(), PASSWORD_BINDING);
protocolMarshaller.marshall(user.getUsername(), USERNAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def log_reject( self, block_id, vtxindex, op, op_data ):
"""
Log a rejected operation
"""
debug_op = self.sanitize_op( op_data )
if 'history' in debug_op:
del debug_op['history']
log.debug("REJECT %s at (%s, %s) data: %s", op_get_opcode_name( op ), block_id, vtxindex,
", ".join( ["%s='%s'" % (k, debug_op[k]) for k in sorted(debug_op.keys())] ))
return |
python | def get_risk_models(oqparam, kind='vulnerability vulnerability_retrofitted '
'fragility consequence'):
"""
:param oqparam:
an OqParam instance
:param kind:
a space-separated string with the kinds of risk models to read
:returns:
a dictionary riskid -> loss_type, kind -> function
"""
kinds = kind.split()
rmodels = AccumDict()
for kind in kinds:
for key in sorted(oqparam.inputs):
mo = re.match('(occupants|%s)_%s$' % (COST_TYPE_REGEX, kind), key)
if mo:
loss_type = mo.group(1) # the cost_type in the key
# can be occupants, structural, nonstructural, ...
rmodel = nrml.to_python(oqparam.inputs[key])
if len(rmodel) == 0:
raise InvalidFile('%s is empty!' % oqparam.inputs[key])
rmodels[loss_type, kind] = rmodel
if rmodel.lossCategory is None: # NRML 0.4
continue
cost_type = str(rmodel.lossCategory)
rmodel_kind = rmodel.__class__.__name__
kind_ = kind.replace('_retrofitted', '') # strip retrofitted
if not rmodel_kind.lower().startswith(kind_):
raise ValueError(
'Error in the file "%s_file=%s": is '
'of kind %s, expected %s' % (
key, oqparam.inputs[key], rmodel_kind,
kind.capitalize() + 'Model'))
if cost_type != loss_type:
raise ValueError(
'Error in the file "%s_file=%s": lossCategory is of '
'type "%s", expected "%s"' %
(key, oqparam.inputs[key],
rmodel.lossCategory, loss_type))
rdict = AccumDict(accum={})
rdict.limit_states = []
for (loss_type, kind), rm in sorted(rmodels.items()):
if kind == 'fragility':
# build a copy of the FragilityModel with different IM levels
newfm = rm.build(oqparam.continuous_fragility_discretization,
oqparam.steps_per_interval)
for (imt, riskid), ffl in newfm.items():
if not rdict.limit_states:
rdict.limit_states.extend(rm.limitStates)
# we are rejecting the case of loss types with different
# limit states; this may change in the future
assert rdict.limit_states == rm.limitStates, (
rdict.limit_states, rm.limitStates)
rdict[riskid][loss_type, kind] = ffl
# TODO: see if it is possible to remove the attribute
# below, used in classical_damage
ffl.steps_per_interval = oqparam.steps_per_interval
elif kind == 'consequence':
for riskid, cf in rm.items():
rdict[riskid][loss_type, kind] = cf
else: # vulnerability
cl_risk = oqparam.calculation_mode in (
'classical', 'classical_risk')
# only for classical_risk reduce the loss_ratios
# to make sure they are strictly increasing
for (imt, riskid), rf in rm.items():
rdict[riskid][loss_type, kind] = (
rf.strictly_increasing() if cl_risk else rf)
return rdict |
java | public void marshall(UpdateGlobalSecondaryIndexAction updateGlobalSecondaryIndexAction, ProtocolMarshaller protocolMarshaller) {
if (updateGlobalSecondaryIndexAction == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(updateGlobalSecondaryIndexAction.getIndexName(), INDEXNAME_BINDING);
protocolMarshaller.marshall(updateGlobalSecondaryIndexAction.getProvisionedThroughput(), PROVISIONEDTHROUGHPUT_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def _get_value_from_json(self, dict_path, metrics_json):
"""
Get a value from a dictionary under N keys, represented as str("key1.key2...key{n}")
"""
for key in dict_path.split('.'):
if key in metrics_json:
metrics_json = metrics_json.get(key)
else:
return None
return metrics_json |
java | @Override
protected void executeImpl() throws TaskExecutionFailedException {
try {
String account = task.getAccount();
String storeId = task.getStoreId();
String spaceId = task.getSpaceId();
String contentId = task.getContentId();
String action = task.getAction();
Map<String, String> props = task.getContentProperties();
String acls = task.getSpaceACLs();
Date timestamp = new Date(Long.valueOf(task.getDateTime()));
auditLogStore.write(account,
storeId,
spaceId,
contentId,
task.getContentChecksum(),
task.getContentMimetype(),
task.getContentSize(),
task.getUserId(),
action,
props != null ? AuditLogStoreUtil.serialize(props) : null,
acls,
task.getSourceSpaceId(),
task.getSourceContentId(),
timestamp);
log.debug("audit task successfully processed: {}", task);
} catch (TransactionSystemException e) {
log.error("failed to write item ( account={} storeId={} spaceId={} contentId={} timestamp={} ) " +
"to the database due to a transactional error. Likely cause: duplicate entry. Details: {}. " +
"Ignoring...",
task.getAccount(),
task.getStoreId(),
task.getSpaceId(),
task.getContentId(),
new Date(Long.valueOf(task.getDateTime())),
e.getMessage());
} catch (Exception e) {
String message = "Failed to execute " + task + ": " + e.getMessage();
log.debug(message, e);
throw new TaskExecutionFailedException(message, e);
}
} |
python | def registerContextEngineId(self, contextEngineId, pduTypes, processPdu):
"""Register application with dispatcher"""
# 4.3.2 -> no-op
# 4.3.3
for pduType in pduTypes:
k = contextEngineId, pduType
if k in self._appsRegistration:
raise error.ProtocolError(
'Duplicate registration %r/%s' % (contextEngineId, pduType))
# 4.3.4
self._appsRegistration[k] = processPdu
debug.logger & debug.FLAG_DSP and debug.logger(
'registerContextEngineId: contextEngineId %r pduTypes '
'%s' % (contextEngineId, pduTypes)) |
python | def sample_gtf(data, D, k, likelihood='gaussian', prior='laplace',
lambda_hyperparams=None, lam_walk_stdev=0.01, lam0=1.,
dp_hyperparameter=None, w_hyperparameters=None,
iterations=7000, burn=2000, thin=10,
robust=False, empirical=False,
verbose=False):
'''Generate samples from the generalized graph trend filtering distribution via a modified Swendsen-Wang slice sampling algorithm.
Options for likelihood: gaussian, binomial, poisson. Options for prior: laplace, doublepareto.'''
Dk = get_delta(D, k)
dk_rows, dk_rowbreaks, dk_cols, dk_vals = decompose_delta(Dk)
if likelihood == 'gaussian':
y, w = data
elif likelihood == 'binomial':
trials, successes = data
elif likelihood == 'poisson':
obs = data
else:
raise Exception('Unknown likelihood type: {0}'.format(likelihood))
if prior == 'laplace':
if lambda_hyperparams == None:
lambda_hyperparams = (1., 1.)
elif prior == 'laplacegamma':
if lambda_hyperparams == None:
lambda_hyperparams = (1., 1.)
if dp_hyperparameter == None:
dp_hyperparameter = 1.
elif prior == 'doublepareto' or prior == 'doublepareto2':
if lambda_hyperparams == None:
lambda_hyperparams = (1.0, 1.0)
if dp_hyperparameter == None:
dp_hyperparameter = 0.1
elif prior == 'cauchy':
if lambda_hyperparams == None:
lambda_hyperparams = (1.0, 1.0)
else:
raise Exception('Unknown prior type: {0}.'.format(prior))
if robust and w_hyperparameters is None:
w_hyperparameters = (1., 1.)
# Run the Gibbs sampler
sample_size = (iterations - burn) / thin
beta_samples = np.zeros((sample_size, D.shape[1]), dtype='double')
lam_samples = np.zeros(sample_size, dtype='double')
if likelihood == 'gaussian':
if prior == 'laplace':
gflbayes_gaussian_laplace(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'laplacegamma':
if robust:
gflbayes_gaussian_laplace_gamma_robust(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
w_hyperparameters[0], w_hyperparameters[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
gflbayes_gaussian_laplace_gamma(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_gaussian_doublepareto(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto2':
gflbayes_gaussian_doublepareto2(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'cauchy':
gflbayes_gaussian_cauchy(len(y), y, w,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif likelihood == 'binomial':
if prior == 'laplace':
gflbayes_binomial_laplace(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_binomial_doublepareto(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'laplacegamma':
if empirical:
gflbayes_empirical_binomial_laplace_gamma(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lam0,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
gflbayes_binomial_laplace_gamma(len(trials), trials, successes,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif likelihood == 'poisson':
if prior == 'laplace':
gflbayes_poisson_laplace(len(obs), obs,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
elif prior == 'doublepareto':
gflbayes_poisson_doublepareto(len(obs), obs,
dk_rows, dk_rowbreaks, dk_cols, dk_vals,
lambda_hyperparams[0], lambda_hyperparams[1],
lam_walk_stdev, lam0, dp_hyperparameter,
iterations, burn, thin,
double_matrix_to_c_pointer(beta_samples), lam_samples)
else:
raise Exception('Unknown likelihood type: {0}'.format(likelihood))
return (beta_samples,lam_samples) |
python | def option(self, *args, **kwargs):
"""
Registers a click.option which falls back to a configmanager Item
if user hasn't provided a value in the command line.
Item must be the last of ``args``.
Examples::
config = Config({'greeting': 'Hello'})
@click.command()
@config.click.option('--greeting', config.greeting)
def say_hello(greeting):
click.echo(greeting)
"""
args, kwargs = _config_parameter(args, kwargs)
return self._click.option(*args, **kwargs) |
python | def close(self):
""" Close the object nicely and release all the data
arrays from memory YOU CANT GET IT BACK, the pointers
and data are gone so use the getData method to get
the data array returned for future use. You can use
putData to reattach a new data array to the imageObject.
"""
if self._image is None:
return
# mcara: I think the code below is not necessary but in order to
# preserve the same functionality as the code removed below,
# I make an empty copy of the image object:
empty_image = fits.HDUList()
for u in self._image:
empty_image.append(u.__class__(data=None, header=None))
# mcara: END unnecessary code
self._image.close() #calls fits.close()
self._image = empty_image |
java | private Runnable getTask() {
boolean timedOut = false; // Did the last poll() time out?
for (;;) {
int c = ctl.get();
int rs = runStateOf(c);
// Check if queue empty only if necessary.
if (rs >= SHUTDOWN && (rs >= STOP || workQueue.isEmpty())) {
decrementWorkerCount();
return null;
}
int wc = workerCountOf(c);
// Are workers subject to culling?
boolean timed = allowCoreThreadTimeOut || wc > corePoolSize;
if ((wc > maximumPoolSize || (timed && timedOut))
&& (wc > 1 || workQueue.isEmpty())) {
if (compareAndDecrementWorkerCount(c))
return null;
continue;
}
try {
Runnable r = timed ?
workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) :
workQueue.take();
if (r != null)
return r;
timedOut = true;
} catch (InterruptedException retry) {
timedOut = false;
}
}
} |
java | private static Map<TypeVariable<?>, Type> invert(Map<TypeVariable<?>, Type> m) {
final TypeVariableMap result = new TypeVariableMap();
for (Map.Entry<TypeVariable<?>, Type> e : m.entrySet()) {
if (e.getValue() instanceof TypeVariable<?>) {
result.put((TypeVariable<?>) e.getValue(), e.getKey());
}
}
return result;
} |
python | def inheritanceTree(self):
"""
Returns the inheritance tree for this schema, traversing up the hierarchy for the inherited schema instances.
:return: <generator>
"""
inherits = self.inherits()
while inherits:
ischema = orb.system.schema(inherits)
if not ischema:
raise orb.errors.ModelNotFound(schema=inherits)
yield ischema
inherits = ischema.inherits() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.