language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public void setInput(final byte[] input)
{
if (input[0] == -128) {
r = new BitReader(inflateInput(input, 1));
}
else {
r = new BitReader(input);
}
} |
java | public static ICommentProcessor wrap(final ICommentProcessor processor, final IProcessorDialect dialect) {
Validate.notNull(dialect, "Dialect cannot be null");
if (processor == null) {
return null;
}
return new CommentProcessorWrapper(processor, dialect);
} |
python | def write(self, session, data):
"""Writes data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
:param session: Unique logical identifier to a session.
:param data: data to be written.
:type data: str
:return: Number of bytes actually transferred, return value of the library call.
:rtype: int, VISAStatus
"""
# from the session handle, dispatch to the write method of the session object.
try:
ret = self.sessions[session].write(data)
except KeyError:
return 0, StatusCode.error_invalid_object
if ret[1] < 0:
raise errors.VisaIOError(ret[1])
return ret |
python | def get_type_of_fields(fields, table):
"""
Return data types of `fields` that are in `table`. If a given
parameter is empty return primary key.
:param fields: list - list of fields that need to be returned
:param table: sa.Table - the current table
:return: list - list of the tuples `(field_name, fields_type)`
"""
if not fields:
fields = table.primary_key
actual_fields = [
field for field in table.c.items() if field[0] in fields
]
data_type_fields = {
name: FIELD_TYPES.get(type(field_type.type), rc.TEXT_FIELD.value)
for name, field_type in actual_fields
}
return data_type_fields |
java | public Hessian2Output createHessian2Output(OutputStream os)
{
Hessian2Output out = createHessian2Output();
out.init(os);
return out;
} |
python | def dim(self, dim):
"""Adjusts contrast to dim the display if dim is True, otherwise sets the
contrast to normal brightness if dim is False.
"""
# Assume dim display.
contrast = 0
# Adjust contrast based on VCC if not dimming.
if not dim:
if self._vccstate == SSD1306_EXTERNALVCC:
contrast = 0x9F
else:
contrast = 0xCF |
java | public void marshall(RemoveTagsFromVaultRequest removeTagsFromVaultRequest, ProtocolMarshaller protocolMarshaller) {
if (removeTagsFromVaultRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(removeTagsFromVaultRequest.getAccountId(), ACCOUNTID_BINDING);
protocolMarshaller.marshall(removeTagsFromVaultRequest.getVaultName(), VAULTNAME_BINDING);
protocolMarshaller.marshall(removeTagsFromVaultRequest.getTagKeys(), TAGKEYS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public WellRestedRequest build() {
return new WellRestedRequest(this.uri, this.credentials, this.proxy, this.dateSerializer, this.dateDeserializer,
this.dateFormat, this.exclusionStrategy, this.excludedFieldNames, this.excludedClassNames,
this.globalHeaders, this.gsonCustomiser, this.disableCookiesForAuthRequests,
this.connectionTimeout, this.socketTimeout);
} |
java | public char[] readAsCharArray()
{
int currentSize = size();
if (currentSize == 0)
{
return new char[0];
}
FixedCharArrayWriter target = new FixedCharArrayWriter(currentSize);
try
{
writeTo(target);
}
catch (IOException e)
{
throw new RuntimeException("Unexpected IOException", e);
}
return target.getCharArray();
} |
java | @XmlElementDecl(namespace = "http://www.citygml.org/ade/sub/0.9.0", name = "HollowSpace", substitutionHeadNamespace = "http://www.opengis.net/citygml/1.0", substitutionHeadName = "_CityObject")
public JAXBElement<HollowSpaceType> createHollowSpace(HollowSpaceType value) {
return new JAXBElement<HollowSpaceType>(_HollowSpace_QNAME, HollowSpaceType.class, null, value);
} |
python | def p_program_tokenstring(p):
""" program : defs NEWLINE
"""
try:
tmp = [str(x()) if isinstance(x, MacroCall) else x for x in p[1]]
except PreprocError as v:
error(v.lineno, v.message)
tmp.append(p[2])
p[0] = tmp |
python | def find_contig_distribution(contig_lengths_dict):
"""
Determine the frequency of different contig size ranges for each strain
:param contig_lengths_dict:
:return: contig_len_dist_dict: dictionary of strain name: tuple of contig size range frequencies
"""
# Initialise the dictionary
contig_len_dist_dict = dict()
for file_name, contig_lengths in contig_lengths_dict.items():
# Initialise integers to store the number of contigs that fall into the different bin sizes
over_1000000 = 0
over_500000 = 0
over_100000 = 0
over_50000 = 0
over_10000 = 0
over_5000 = 0
other = 0
for contig_length in contig_lengths:
# Depending on the size of the contig, increment the appropriate integer
if contig_length > 1000000:
over_1000000 += 1
elif contig_length > 500000:
over_500000 += 1
elif contig_length > 100000:
over_100000 += 1
elif contig_length > 50000:
over_50000 += 1
elif contig_length > 10000:
over_10000 += 1
elif contig_length > 5000:
over_5000 += 1
else:
other += 1
# Populate the dictionary with a tuple of each of the size range frequencies
contig_len_dist_dict[file_name] = (over_1000000,
over_500000,
over_100000,
over_50000,
over_10000,
over_5000,
other)
return contig_len_dist_dict |
java | private void unprotectedMkdir(long inodeId, INode[] inodes, int pos,
byte[] name, PermissionStatus permission, boolean inheritPermission,
long timestamp) throws QuotaExceededException {
inodes[pos] = addChild(inodes, pos,
new INodeDirectory(inodeId, name, permission, timestamp),
-1, inheritPermission );
} |
python | def set_text(self, text):
"""Sets the text.
arg: text (string): the new text
raise: InvalidArgument - ``text`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``text`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['text'] = self._get_display_text(text, self.get_text_metadata()) |
python | def task_absent(name):
'''
Ensure that a task is absent from Kapacitor.
name
Name of the task.
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
task = __salt__['kapacitor.get_task'](name)
if task:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Task would have been deleted'
else:
result = __salt__['kapacitor.delete_task'](name)
ret['result'] = result['success']
if not ret['result']:
ret['comment'] = 'Could not disable task'
if result.get('stderr'):
ret['comment'] += '\n' + result['stderr']
return ret
ret['comment'] = 'Task was deleted'
ret['changes'][name] = 'deleted'
else:
ret['comment'] = 'Task does not exist'
return ret |
java | void formatEra(StringBuilder b, ZonedDateTime d, int width, FieldVariants eras) {
int year = d.getYear();
int index = year < 0 ? 0 : 1;
switch (width) {
case 5:
b.append(eras.narrow[index]);
break;
case 4:
b.append(eras.wide[index]);
break;
case 3:
case 2:
case 1:
b.append(eras.abbreviated[index]);
break;
}
} |
java | @SuppressWarnings("unchecked")
public EList<IfcReinforcementBarProperties> getCrossSectionReinforcementDefinitions() {
return (EList<IfcReinforcementBarProperties>) eGet(
Ifc2x3tc1Package.Literals.IFC_SECTION_REINFORCEMENT_PROPERTIES__CROSS_SECTION_REINFORCEMENT_DEFINITIONS,
true);
} |
java | protected void init(Class<T> cls, Key ... keys) {
// get the columns
List<String> targetColumns = new ArrayList<String>();
Class<?> clazz = cls;
while( clazz != null && !clazz.equals(Object.class) ) {
Field[] fields = clazz.getDeclaredFields();
for( Field f : fields ) {
int m = f.getModifiers();
if( Modifier.isTransient(m) || Modifier.isStatic(m) ) {
continue;
}
if (f.getType().getName().equals(Collection.class.getName())) {
continue; // this is handled by dependency manager
}
if( !f.getType().getName().equals(Translator.class.getName()) ) {
targetColumns.add(f.getName());
types.put(f.getName(), f.getType());
}
}
clazz = clazz.getSuperclass();
}
columns = new String[targetColumns.size()];
for (int i = 0; i < targetColumns.size(); i++) {
columns[i] = targetColumns.get(i);
}
} |
java | public String getProjectName( MavenProject project )
{
File dotProject = new File( project.getBasedir(), ".project" );
try
{
Xpp3Dom dom = Xpp3DomBuilder.build( ReaderFactory.newXmlReader( dotProject ) );
return dom.getChild( "name" ).getValue();
}
catch ( Exception e )
{
getLogger().warn( "Failed to read the .project file" );
return project.getArtifactId();
}
} |
python | def replace_query_parameters(request, replacements):
"""
Replace query parameters in request according to replacements. The
replacements should be a list of (key, value) pairs where the value can be
any of:
1. A simple replacement string value.
2. None to remove the given header.
3. A callable which accepts (key, value, request) and returns a string
value or None.
"""
query = request.query
new_query = []
replacements = dict(replacements)
for k, ov in query:
if k not in replacements:
new_query.append((k, ov))
else:
rv = replacements[k]
if callable(rv):
rv = rv(key=k, value=ov, request=request)
if rv is not None:
new_query.append((k, rv))
uri_parts = list(urlparse(request.uri))
uri_parts[4] = urlencode(new_query)
request.uri = urlunparse(uri_parts)
return request |
python | def diskstats():
'''
.. versionchanged:: 2016.3.2
Return the disk stats for this minion
.. versionchanged:: 2016.11.4
Added support for AIX
CLI Example:
.. code-block:: bash
salt '*' status.diskstats
'''
def linux_diskstats():
'''
linux specific implementation of diskstats
'''
ret = {}
try:
with salt.utils.files.fopen('/proc/diskstats', 'r') as fp_:
stats = salt.utils.stringutils.to_unicode(fp_.read())
except IOError:
pass
else:
for line in stats.splitlines():
if not line:
continue
comps = line.split()
ret[comps[2]] = {
'major': _number(comps[0]),
'minor': _number(comps[1]),
'device': _number(comps[2]),
'reads_issued': _number(comps[3]),
'reads_merged': _number(comps[4]),
'sectors_read': _number(comps[5]),
'ms_spent_reading': _number(comps[6]),
'writes_completed': _number(comps[7]),
'writes_merged': _number(comps[8]),
'sectors_written': _number(comps[9]),
'ms_spent_writing': _number(comps[10]),
'io_in_progress': _number(comps[11]),
'ms_spent_in_io': _number(comps[12]),
'weighted_ms_spent_in_io': _number(comps[13])
}
return ret
def generic_diskstats():
'''
generic implementation of diskstats
note: freebsd and sunos
'''
ret = {}
iostat = __salt__['cmd.run']('iostat -xzd').splitlines()
header = iostat[1]
for line in iostat[2:]:
comps = line.split()
ret[comps[0]] = {}
for metric, value in zip(header.split()[1:], comps[1:]):
ret[comps[0]][metric] = _number(value)
return ret
def aix_diskstats():
'''
AIX specific implementation of diskstats
'''
ret = {}
procn = None
fields = []
disk_name = ''
disk_mode = ''
for line in __salt__['cmd.run']('iostat -dDV').splitlines():
# Note: iostat -dDV is per-system
#
#System configuration: lcpu=8 drives=1 paths=2 vdisks=2
#
#hdisk0 xfer: %tm_act bps tps bread bwrtn
# 0.0 0.8 0.0 0.0 0.8
# read: rps avgserv minserv maxserv timeouts fails
# 0.0 2.5 0.3 12.4 0 0
# write: wps avgserv minserv maxserv timeouts fails
# 0.0 0.3 0.2 0.7 0 0
# queue: avgtime mintime maxtime avgwqsz avgsqsz sqfull
# 0.3 0.0 5.3 0.0 0.0 0.0
#--------------------------------------------------------------------------------
if not line or line.startswith('System') or line.startswith('-----------'):
continue
if not re.match(r'\s', line):
#have new disk
dsk_comps = line.split(':')
dsk_firsts = dsk_comps[0].split()
disk_name = dsk_firsts[0]
disk_mode = dsk_firsts[1]
fields = dsk_comps[1].split()
ret[disk_name] = []
procn = len(ret[disk_name])
ret[disk_name].append({})
ret[disk_name][procn][disk_mode] = {}
continue
if ':' in line:
comps = line.split(':')
fields = comps[1].split()
disk_mode = comps[0].lstrip()
procn = len(ret[disk_name])
ret[disk_name].append({})
ret[disk_name][procn][disk_mode] = {}
else:
comps = line.split()
for i in range(0, len(fields)):
if len(comps) > i:
ret[disk_name][procn][disk_mode][fields[i]] = comps[i]
return ret
# dict that return a function that does the right thing per platform
get_version = {
'Linux': linux_diskstats,
'FreeBSD': generic_diskstats,
'SunOS': generic_diskstats,
'AIX': aix_diskstats,
}
errmsg = 'This method is unsupported on the current operating system!'
return get_version.get(__grains__['kernel'], lambda: errmsg)() |
python | def _compress(self):
"""Prunes the cataloged observations."""
rank = 0.0
current = self._head
while current and current._successor:
if current._rank + current._successor._rank + current._successor._delta <= self._invariant(rank, self._observations):
removed = current._successor
current._value = removed._value
current._rank += removed._rank
current._delta = removed._delta
current._successor = removed._successor
rank += current._rank
current = current._successor |
java | @Override
public void eUnset(int featureID) {
switch (featureID) {
case AfplibPackage.GCMRK__RG:
getRg().clear();
return;
}
super.eUnset(featureID);
} |
java | public void render(Template template, Map<String, Object> parameters, Writer out, GlobalScope... extraGlobalScopes) throws IOException {
newInterpreter(extraGlobalScopes).declare(parameters).process((TemplateImpl) template, out);
} |
python | def update(self):
"""Updates the dictionary of the device"""
if "tag_groups" not in self._device_dict:
return
for group in self.tag_groups:
group.update()
for i in range(len(self._device_dict["tag_groups"])):
tag_group_dict = self._device_dict["tag_groups"][i]
for group in self.tag_groups:
if group.name == tag_group_dict["common.ALLTYPES_NAME"]:
self._device_dict["tag_groups"][i] = group.as_dict() |
python | def visit_ListComp(self, node: AST, dfltChaining: bool = True) -> str:
"""Return `node`s representation as list comprehension."""
return f"[{self.visit(node.elt)} " \
f"{' '.join(self.visit(gen) for gen in node.generators)}]" |
java | public static String link(Key k, String content) {
return link(k, content, null, null, null);
} |
python | async def disable(self, reason=None):
"""Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success
"""
params = {"enable": True, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 |
java | public Map<String, DeleteOperation> getDeleteOperations() {
Map<String, DeleteOperation> delOperations = new TreeMap<String, DeleteOperation>();
for (Entry<String, IOperation> entry : collectionUpdateData.entrySet()) {
String key = entry.getKey();
IOperation op = entry.getValue();
if (op.getOperationType().equals(Type.DELETE)) {
delOperations.put(key, (DeleteOperation)op);
}
}
return delOperations;
} |
java | public URLConnection connectWithRedirect(URL theURL) throws IOException {
URLConnection conn = null;
String accept_header = buildAcceptHeader();
int redirect_count = 0;
boolean done = false;
while (!done) {
if (theURL.getProtocol().equals("file")) {
return null;
}
Boolean isHttp = (theURL.getProtocol().equals("http") || theURL
.getProtocol().equals("https"));
logger.debug("Requesting: " + theURL.toString());
conn = theURL.openConnection();
if (isHttp) {
logger.debug("Accept: " + accept_header);
conn.setRequestProperty("Accept", accept_header);
}
conn.setConnectTimeout(60000);
conn.setReadTimeout(60000);
conn.connect();
done = true; // by default quit after one request
if (isHttp) {
logger.debug("Response: " + conn.getHeaderField(0));
int rc = ((HttpURLConnection) conn).getResponseCode();
if ((rc == HttpURLConnection.HTTP_MOVED_PERM)
|| (rc == HttpURLConnection.HTTP_MOVED_TEMP)
|| (rc == HttpURLConnection.HTTP_SEE_OTHER)
|| (rc == 307)) {
if (redirect_count > 10) {
return null; // Error: too many redirects
}
redirect_count++;
String loc = conn.getHeaderField("Location");
if (loc != null) {
theURL = new URL(loc);
done = false;
} else {
return null; // Bad redirect
}
} else if ((rc < 200) || (rc >= 300)) {
return null; // Unsuccessful
}
}
}
return conn;
} |
java | public Observable<CertificateBundle> updateCertificateAsync(String vaultBaseUrl, String certificateName, String certificateVersion, CertificatePolicy certificatePolicy, CertificateAttributes certificateAttributes, Map<String, String> tags) {
return updateCertificateWithServiceResponseAsync(vaultBaseUrl, certificateName, certificateVersion, certificatePolicy, certificateAttributes, tags).map(new Func1<ServiceResponse<CertificateBundle>, CertificateBundle>() {
@Override
public CertificateBundle call(ServiceResponse<CertificateBundle> response) {
return response.body();
}
});
} |
python | def get_prefix(self, key_prefix, sort_order=None, sort_target=None):
"""Get a range of keys with a prefix.
:param sort_order: 'ascend' or 'descend' or None
:param key_prefix: first key in range
:returns: sequence of (value, metadata) tuples
"""
return self.get(key_prefix,
metadata=True,
range_end=_encode(_increment_last_byte(key_prefix)),
sort_order=sort_order,
sort_target=sort_target) |
python | def truncated_exponential_backoff(
slot_delay, collision=0, max_collisions=5,
op=operator.mul, in_range=True):
"""Truncated Exponential Backoff
see https://en.wikipedia.org/wiki/Exponential_backoff
"""
truncated_collision = collision % max_collisions
if in_range:
slots = random.randint(0, truncated_collision)
else:
slots = truncated_collision
return op(slot_delay, slots) |
java | public void setStreamInfo(Stream stream, final KickflipCallback cb) {
if (!assertActiveUserAvailable(cb)) return;
GenericData data = new GenericData();
data.put("stream_id", stream.getStreamId());
data.put("uuid", getActiveUser().getUUID());
if (stream.getTitle() != null) {
data.put("title", stream.getTitle());
}
if (stream.getDescription() != null) {
data.put("description", stream.getDescription());
}
if (stream.getExtraInfo() != null) {
data.put("extra_info", new Gson().toJson(stream.getExtraInfo()));
}
if (stream.getLatitude() != 0) {
data.put("lat", stream.getLatitude());
}
if (stream.getLongitude() != 0) {
data.put("lon", stream.getLongitude());
}
if (stream.getCity() != null) {
data.put("city", stream.getCity());
}
if (stream.getState() != null) {
data.put("state", stream.getState());
}
if (stream.getCountry() != null) {
data.put("country", stream.getCountry());
}
if (stream.getThumbnailUrl() != null) {
data.put("thumbnail_url", stream.getThumbnailUrl());
}
data.put("private", stream.isPrivate());
data.put("deleted", stream.isDeleted());
post(SET_META, new UrlEncodedContent(data), Stream.class, cb);
} |
python | def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
# back compatible -- older files were named with lane information, use sample name now
if names["lane"] != dd.get_sample_name(data):
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
else:
out_file = None
if not out_file or not utils.file_exists(out_file):
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
qual_format = data["config"]["algorithm"].get("quality_format", "").lower()
min_size = None
if data.get("align_split") or fastq_file.endswith(".sdf"):
if fastq_file.endswith(".sdf"):
min_size = rtg.min_read_size(fastq_file)
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
if qual_format == "illumina":
fastq_file = alignprep.fastq_convert_pipe_cl(fastq_file, data)
if pair_file:
pair_file = alignprep.fastq_convert_pipe_cl(pair_file, data)
rg_info = novoalign.get_rg_info(names)
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
# If we cannot do piping, use older bwa aln approach
if ("bwa-mem" not in dd.get_tools_on(data) and
("bwa-mem" in dd.get_tools_off(data) or not _can_use_mem(fastq_file, data, min_size))):
out_file = _align_backtrack(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
else:
out_file = _align_mem(fastq_file, pair_file, ref_file, out_file,
names, rg_info, data)
data["work_bam"] = out_file
return data |
python | def lorentz_deriv((x, y, z), t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] |
python | def expand(conf, output_requirements_filename, input_requirements_filename):
"""Expand given requirements file by extending it using pip freeze
args:
input_requirements_filename: the requirements filename to expand
output_requirements_filename: the output filename for the expanded
requirements file
"""
exit_if_file_not_exists(input_requirements_filename, conf)
cireqs.expand_requirements(
requirements_filename=input_requirements_filename,
expanded_requirements_filename=output_requirements_filename,
**conf._asdict()
)
click.echo(click.style('✓', fg='green') + " {} has been expanded into {}".format(
input_requirements_filename, output_requirements_filename
)) |
python | def parenthesize(self, expr, level, *args, strict=False, **kwargs):
"""Render `expr` and wrap the result in parentheses if the precedence
of `expr` is below the given `level` (or at the given `level` if
`strict` is True. Extra `args` and `kwargs` are passed to the internal
`doit` renderer"""
needs_parenths = (
(precedence(expr) < level) or
(strict and precedence(expr) == level))
if needs_parenths:
return (
self._parenth_left + self.doprint(expr, *args, **kwargs) +
self._parenth_right)
else:
return self.doprint(expr, *args, **kwargs) |
python | def serialize_list(self, tag):
"""Return the literal representation of a list tag."""
separator, fmt = self.comma, '[{}]'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(map(self.serialize, tag))) |
python | def image_embedding_column(key, module_spec):
"""Uses a Module to get a dense 1-D representation from the pixels of images.
This feature column can be used on images, represented as float32 tensors of
RGB pixel data in the range [0,1]. This can be read from a numeric_column()
if the tf.Example input data happens to have decoded images, all with the
same shape [height, width, 3]. More commonly, the input_fn will have code to
explicitly decode images, resize them (possibly after performing data
augmentation such as random crops etc.), and provide a batch of shape
[batch_size, height, width, 3].
The result of this feature column is the result of passing its `input`
through the module `m` instantiated from `module_spec`, as per
`result = m({"images": input})`. The `result` must have dtype float32 and
shape `[batch_size, num_features]` with a known value of num_features.
Example:
```python
image_column = hub.image_embedding_column("embeddings", "/tmp/image-module")
feature_columns = [image_column, ...]
estimator = tf.estimator.LinearClassifier(feature_columns, ...)
height, width = hub.get_expected_image_size(image_column.module_spec)
input_fn = ... # Provides "embeddings" with shape [None, height, width, 3].
estimator.train(input_fn, ...)
```
Args:
key: A string or `_FeatureColumn` identifying the input image data.
module_spec: A string handle or a `ModuleSpec` identifying the module.
Returns:
`_DenseColumn` that converts from pixel data.
Raises:
ValueError: if module_spec is not suitable for use in this feature column.
"""
module_spec = module.as_module_spec(module_spec)
_check_module_is_image_embedding(module_spec)
return _ImageEmbeddingColumn(key=key, module_spec=module_spec) |
java | public void delete(RecordId nextDeletedSlot) {
Constant flag = EMPTY_CONST;
setVal(currentPos(), flag);
setNextDeletedSlotId(nextDeletedSlot);
} |
java | public static void unregisterIdlingResource(@NonNull String name) {
throwIfAbsent(name);
CappuccinoIdlingResource idlingResource = mIdlingResourceRegistry.get(name);
Espresso.unregisterIdlingResources(idlingResource);
mIdlingResourceRegistry.remove(name);
} |
python | def get_file(hash):
"""Return the contents of the file as a ``memoryview``."""
stmt = _get_sql('get-file.sql')
args = dict(hash=hash)
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(stmt, args)
try:
file, _ = cursor.fetchone()
except TypeError:
raise FileNotFound(hash)
return memoryview(file[:]) |
python | def run_gerrit_command(self, command):
""" Run the given command.
Make sure we're connected to the remote server, and run `command`.
Return the results as a `GerritSSHCommandResult`.
Raise `ValueError` if `command` is not a string, or `GerritError` if
command execution fails.
"""
if not isinstance(command, basestring):
raise ValueError("command must be a string")
gerrit_command = "gerrit " + command
# are we sending non-ascii data?
try:
gerrit_command.encode('ascii')
except UnicodeEncodeError:
gerrit_command = gerrit_command.encode('utf-8')
self._connect()
try:
stdin, stdout, stderr = self.exec_command(gerrit_command,
bufsize=1,
timeout=None,
get_pty=False)
except SSHException as err:
raise GerritError("Command execution error: %s" % err)
return GerritSSHCommandResult(command, stdin, stdout, stderr) |
python | def _remove_none_values(dictionary):
""" Remove dictionary keys whose value is None """
return list(map(dictionary.pop,
[i for i in dictionary if dictionary[i] is None])) |
python | def translate_query_params(cls, **kwargs):
"""
Translate an arbirtary keyword argument to the expected query.
TODO: refactor this into something less insane.
XXX: Clean this up. It's *too* flexible.
In the v2 API, many endpoints expect a particular query argument to be
in the form of `query=xxx` where `xxx` would be the name of perhaps
the name, ID or otherwise. This function ought to take a more aptly
named parameter specified in `TRANSLATE_QUERY_PARAM`, and substitute it
into the `query` keyword argument. The purpose is so that some models
(optionally) have nicer named keyword arguments than `query` for easier
to read python.
If a query argument is given then the output should be that value. If a
substitute value is given as a keyword specified in
`TRANSLATE_QUERY_PARAM`(and query is not) then the `query` argument
will be that keyword argument.
Eg. No query param
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty',}
...
output = {'query': 'PagerDuty'}
or, query param explicitly
TRANSLATE_QUERY_PARAM = ('name',)
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'query': 'XXXXPlopperDuty'}
or, TRANSLATE_QUERY_PARAM is None
TRANSLATE_QUERY_PARAM = None
kwargs = {'name': 'PagerDuty', 'query': 'XXXXPlopperDuty'}
...
output = {'output': 'XXXXPlopperDuty', 'name': 'PagerDuty'}
"""
values = []
output = kwargs.copy()
query = kwargs.pop('query', None)
# remove any of the TRANSLATE_QUERY_PARAMs in output
for param in (cls.TRANSLATE_QUERY_PARAM or []):
popped = output.pop(param, None)
if popped is not None:
values.append(popped)
# if query is provided, just use it
if query is not None:
output['query'] = query
return output
# if query is not provided, use the first parameter we removed from
# the kwargs
try:
output['query'] = next(iter(values))
except StopIteration:
pass
return output |
java | public int getUnique() {
TIntHashSet inUse = new TIntHashSet();
for (int i = 0; i < length; i++) {
inUse.add(get(i));
}
return inUse.size();
} |
python | def volume(self, value=None):
"""Get/set the volume occupied by actor."""
mass = vtk.vtkMassProperties()
mass.SetGlobalWarningDisplay(0)
mass.SetInputData(self.polydata())
mass.Update()
v = mass.GetVolume()
if value is not None:
if not v:
colors.printc("~bomb Volume is zero: cannot rescale.", c=1, end="")
colors.printc(" Consider adding actor.triangle()", c=1)
return self
self.scale(value / v)
return self
else:
return v |
java | public OvhVoicemailMessages billingAccount_voicemail_serviceName_directories_id_GET(String billingAccount, String serviceName, Long id) throws IOException {
String qPath = "/telephony/{billingAccount}/voicemail/{serviceName}/directories/{id}";
StringBuilder sb = path(qPath, billingAccount, serviceName, id);
String resp = exec(qPath, "GET", sb.toString(), null);
return convertTo(resp, OvhVoicemailMessages.class);
} |
java | public String [] availableFlags(){
Iterator<String> it = flags.keySet().iterator();
String [] out = new String[(flags.keySet().size())];
int i = 0;
while(it.hasNext()){
out[i] = it.next();
i++;
}
return out;
} |
python | def to_dict(self):
"""
Returns:
dict: Concise represented as a dictionary.
"""
final_res = {
"param": self._param,
"unused_param": self.unused_param,
"execution_time": self._exec_time,
"output": {"accuracy": self.get_accuracy(),
"weights": self.get_weights(),
"splines": self._splines
}
}
return final_res |
python | def check_gcdt_update():
"""Check whether a newer gcdt is available and output a warning.
"""
try:
inst_version, latest_version = get_package_versions('gcdt')
if inst_version < latest_version:
log.warn('Please consider an update to gcdt version: %s' %
latest_version)
except GracefulExit:
raise
except Exception:
log.warn('PyPi appears to be down - we currently can\'t check for newer gcdt versions') |
python | def _deployment_menu_entry(deployment):
"""Build a string to display in the 'select deployment' menu."""
paths = ", ".join([_module_name_for_display(module) for module in deployment['modules']])
regions = ", ".join(deployment.get('regions', []))
return "%s - %s (%s)" % (deployment.get('name'), paths, regions) |
java | public static <T> List<T> minList (Iterable<T> iterable, Comparator<? super T> comp)
{
return maxList(iterable, java.util.Collections.reverseOrder(comp));
} |
java | public void setTimestampProperty(String pstrSection, String pstrProp,
Timestamp ptsVal, String pstrComments)
{
INISection objSec = null;
objSec = (INISection) this.mhmapSections.get(pstrSection);
if (objSec == null)
{
objSec = new INISection(pstrSection);
this.mhmapSections.put(pstrSection, objSec);
}
objSec.setProperty(pstrProp, timeToStr(ptsVal, this.mstrTimeStampFmt),
pstrComments);
} |
python | def decrypt_result(self, *args, **kwargs):
"""
Decrypts ProcessData result with comm keys
:param args:
:param kwargs:
:return:
"""
if self.response is None:
raise ValueError('Empty response')
if self.response.response is None \
or 'result' not in self.response.response \
or self.response.response['result'] is None:
raise ValueError('No result data')
res_hex = self.response.response['result']
# Strip out the plaintext part
plain_length = bytes_to_long(from_hex(res_hex[0:4]))
if plain_length > 0:
res_hex = res_hex[4 + plain_length:]
else:
res_hex = res_hex[4:]
# Optionally strip trailing _... string
idx_trail = res_hex.find('_')
if idx_trail != -1:
res_hex = res_hex[0:idx_trail]
# Decode hex coding
res_bytes = from_hex(res_hex)
# Crypto stuff - check the length & padding
if len(res_bytes) < 16:
raise InvalidResponse('Result too short')
mac_given = res_bytes[-16:]
res_bytes = res_bytes[:-16]
# Check the MAC
mac_computed = cbc_mac(self.uo.mac_key, res_bytes)
if not str_equals(mac_given, mac_computed):
raise CryptoError('MAC invalid')
# Decrypt
decrypted = aes_dec(self.uo.enc_key, res_bytes)
if len(decrypted) < 1 + 4 + 8 or decrypted[0:1] != bchr(0xf1):
raise InvalidResponse('Invalid format')
self.resp_object_id = bytes_to_long(decrypted[1:5])
self.resp_nonce = EBUtils.demangle_nonce(decrypted[5:5 + EBConsts.FRESHNESS_NONCE_LEN])
self.decrypted = decrypted[5 + EBConsts.FRESHNESS_NONCE_LEN:]
self.decrypted = PKCS7.unpad(self.decrypted)
return self.response |
python | def _calculate_ms_from_base(self, size):
"""Calculates the rotated minimum size from the given base minimum
size."""
hw = size.x * 0.5
hh = size.y * 0.5
a = datatypes.Point(hw, hh).get_rotated(self.angle)
b = datatypes.Point(-hw, hh).get_rotated(self.angle)
c = datatypes.Point(hw, -hh).get_rotated(self.angle)
d = datatypes.Point(-hw, -hh).get_rotated(self.angle)
minp = a.get_minimum(b).get_minimum(c).get_minimum(d)
maxp = a.get_maximum(b).get_maximum(c).get_maximum(d)
return maxp - minp |
python | def validate_metadata_sign(xml, cert=None, fingerprint=None, fingerprintalg='sha1', validatecert=False, debug=False):
"""
Validates a signature of a EntityDescriptor.
:param xml: The element we should validate
:type: string | Document
:param cert: The pubic cert
:type: string
:param fingerprint: The fingerprint of the public cert
:type: string
:param fingerprintalg: The algorithm used to build the fingerprint
:type: string
:param validatecert: If true, will verify the signature and if the cert is valid.
:type: bool
:param debug: Activate the xmlsec debug
:type: bool
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
"""
if xml is None or xml == '':
raise Exception('Empty string supplied as input')
elif isinstance(xml, etree._Element):
elem = xml
elif isinstance(xml, Document):
xml = xml.toxml()
elem = fromstring(str(xml), forbid_dtd=True)
elif isinstance(xml, Element):
xml.setAttributeNS(
unicode(OneLogin_Saml2_Constants.NS_MD),
'xmlns:md',
unicode(OneLogin_Saml2_Constants.NS_MD)
)
xml = xml.toxml()
elem = fromstring(str(xml), forbid_dtd=True)
elif isinstance(xml, basestring):
elem = fromstring(str(xml), forbid_dtd=True)
else:
raise Exception('Error parsing xml string')
error_callback_method = None
if debug:
error_callback_method = print_xmlsec_errors
xmlsec.set_error_callback(error_callback_method)
xmlsec.addIDs(elem, ["ID"])
signature_nodes = OneLogin_Saml2_Utils.query(elem, '/md:EntitiesDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_Utils.query(elem, '/md:EntityDescriptor/ds:Signature')
if len(signature_nodes) == 0:
signature_nodes += OneLogin_Saml2_Utils.query(elem, '/md:EntityDescriptor/md:SPSSODescriptor/ds:Signature')
signature_nodes += OneLogin_Saml2_Utils.query(elem, '/md:EntityDescriptor/md:IDPSSODescriptor/ds:Signature')
if len(signature_nodes) > 0:
for signature_node in signature_nodes:
OneLogin_Saml2_Utils.validate_node_sign(signature_node, elem, cert, fingerprint, fingerprintalg, validatecert, debug, raise_exceptions=True)
return True
else:
raise Exception('Could not validate metadata signature: No signature nodes found.') |
java | public Image getImage(GraphicsConfiguration config, int w, int h, Object... args) {
lock.readLock().lock();
try {
PixelCountSoftReference ref = map.get(hash(config, w, h, args));
// check reference has not been lost and the key truly matches, in
// case of false positive hash match
if (ref != null && ref.equals(config, w, h, args)) {
return ref.get();
} else {
return null;
}
} finally {
lock.readLock().unlock();
}
} |
python | def calculate_generalized_advantage_estimator(
reward, value, done, gae_gamma, gae_lambda):
# pylint: disable=g-doc-args
"""Generalized advantage estimator.
Returns:
GAE estimator. It will be one element shorter than the input; this is
because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N].
"""
# pylint: enable=g-doc-args
next_value = value[1:, :]
next_not_done = 1 - tf.cast(done[1:, :], tf.float32)
delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done
- value[:-1, :])
return_ = tf.reverse(tf.scan(
lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg,
[tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])],
tf.zeros_like(delta[0, :]),
parallel_iterations=1), [0])
return tf.check_numerics(return_, "return") |
python | def Extract_Checkpoints(self):
'''
Extract the checkpoints and store in self.tracking_data
'''
# Make sure page is available
if self.page is None:
raise Exception("The HTML data was not fetched due to some reasons")
soup = BeautifulSoup(self.page,'html.parser')
# Check for invalid tracking number by checking if table element is present
if soup.find('thead') == None:
raise ValueError('Invalid tracking number')
# Assign the current status of the shipment - self.status
if 'Returned' in self.page:
self.status = 'R'
elif 'Signed for by:' in self.page:
self.status = 'C'
else: # The shipment is in Transit
self.status = 'T'
# The full checkpoints table div.
table = soup.find('table',{'class':'result-checkpoints'}).contents
cur_date = None # The date of the next few checkpoints, initially None
checkpoint = None
for element in table:
if element.name == 'thead':
# This has the date for the next few checkpoints
cur_date = element.find('th',{'colspan':'2'}).string.strip() + ' '
elif element.name == 'tbody':
# A checkpoint whose date = cur_date
checkpoint = {'status':'','date':cur_date,'location':''}
tds = element.findAll('td')
checkpoint['status'] = tds[1].string.strip()
checkpoint['location'] = tds[2].string.strip()
checkpoint['date'] += tds[3].string.strip()
date_time_format = "%d-%b-%Y %H:%M"
checkpoint['date'] = parse(checkpoint['date'])
self.tracking_data.append(checkpoint)
self.tracking_data = sorted(self.tracking_data, key=lambda k: k['date']) |
python | def security_iter(nodearr):
""" provide a security data iterator by returning a tuple of (Element, SecurityError) which are mutually exclusive """
assert nodearr.Name == 'securityData' and nodearr.IsArray
for i in range(nodearr.NumValues):
node = nodearr.GetValue(i)
err = XmlHelper.get_security_error(node)
result = (None, err) if err else (node, None)
yield result |
java | private int writeUTF8Slow(final CharSequence chars, int off, int len)
{
int octets = 0;
while (len > 0)
{
final char ch = chars.charAt(off);
if (ch >= LOW_SURROGATE_FIRST && ch <= LOW_SURROGATE_LAST)
{
throw new IllegalArgumentException("Unpaired low surrogate: " + (int) ch);
}
if ((ch >= HIGH_SURROGATE_FIRST && ch <= HIGH_SURROGATE_LAST))
{
// we need to look ahead in this case
off++;
len--;
if (len == 0)
{
throw new IllegalArgumentException("Unpaired low surrogate at end of character sequence: " + ch);
}
final int ch2 = chars.charAt(off);
if (ch2 < LOW_SURROGATE_FIRST || ch2 > LOW_SURROGATE_LAST)
{
throw new IllegalArgumentException("Low surrogate with unpaired high surrogate: " + ch + " + " + ch2);
}
// at this point we have a high and low surrogate
final int codepoint = (((ch - HIGH_SURROGATE_FIRST) << BITS_PER_SURROGATE) | (ch2 - LOW_SURROGATE_FIRST)) + SURROGATE_BASE;
writeByte((byte) (UTF8_4_OCTET_PREFIX_MASK | ( codepoint >> UTF8_4_OCTET_SHIFT) ));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | ((codepoint >> UTF8_3_OCTET_SHIFT) & UTF8_FOLLOW_MASK)));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | ((codepoint >> UTF8_2_OCTET_SHIFT) & UTF8_FOLLOW_MASK)));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | ( codepoint & UTF8_FOLLOW_MASK)));
octets += 4;
}
else if (ch < UTF8_2_OCTET_MIN_VALUE)
{
writeByte((byte) ch);
octets++;
}
else if (ch < UTF8_3_OCTET_MIN_VALUE)
{
writeByte((byte) (UTF8_2_OCTET_PREFIX_MASK | (ch >> UTF8_2_OCTET_SHIFT) ));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | (ch & UTF8_FOLLOW_MASK)));
octets += 2;
}
else
{
writeByte((byte) (UTF8_3_OCTET_PREFIX_MASK | ( ch >> UTF8_3_OCTET_SHIFT) ));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | ((ch >> UTF8_2_OCTET_SHIFT) & UTF8_FOLLOW_MASK)));
writeByte((byte) (UTF8_FOLLOW_PREFIX_MASK | ( ch & UTF8_FOLLOW_MASK)));
octets += 3;
}
off++;
len--;
}
return octets;
} |
java | private void removeDuplicateRow(DoubleMatrix1D c,
DoubleMatrix2D A, DoubleMatrix1D b,
DoubleMatrix1D lb, DoubleMatrix1D ub,
DoubleMatrix1D ylb, DoubleMatrix1D yub,
DoubleMatrix1D zlb, DoubleMatrix1D zub) {
//the position 0 is for empty rows, 1 is for row singleton and 2 for row doubleton
int startingLength = 3;
for(int i=startingLength; i<vRowLengthMap.length; i++){
int[] vRowLengthMapI = vRowLengthMap[i];
if(vRowLengthMapI == null || vRowLengthMapI.length < 1){
//no rows has this number of nz
continue;
}
boolean stop = false;
for(int j=0; !stop && j<vRowLengthMapI.length; j++){
short prow = (short)vRowLengthMapI[j];//the row of A that has this number of nz
if(vRowPositions[prow].length==0 || prow < nOfSlackVariables){
//the upper left part of A is diagonal if there are the slack variables:
//there is no sparsity superset possible
continue;
}
short[] vRowPositionsProw = vRowPositions[prow];
if(vRowPositionsProw.length != i){
log.debug("Row "+prow+" has an unexpected number of nz: expected " + i + " but is " + vRowPositionsProw.length);
throw new IllegalStateException();
}
for(int si=i; !stop && si<vRowLengthMap.length; si++){
//look into rows with superset sparsity pattern
int[] vRowLengthMapSI = vRowLengthMap[si];
if(vRowLengthMapSI == null || vRowLengthMapSI.length < 1){
continue;
}
for(int sj=0; sj<vRowLengthMapSI.length; sj++){
if(si==i && sj <= j){
continue;//look forward, not behind
}
short srow = (short)vRowLengthMapSI[sj];
if(vRowPositions[srow].length==0){
continue;
}
short[] vRowPositionsSrow = vRowPositions[srow];
//same sparsity pattern?
if(isSubsetSparsityPattern(vRowPositionsProw, vRowPositionsSrow)){
log.debug("found superset sparsity pattern: row " + prow + " contained in row " + srow);
//look for the higher number of coefficients that can be deleted
Map<Double, List<Integer>> coeffRatiosMap = new HashMap<Double, List<Integer>>();
for(short k=0; k<vRowPositionsProw.length; k++){
short col = vRowPositionsProw[k];
double APRL = A.getQuick(prow, col);
double ASRL = A.getQuick(srow, col);
double ratio = -ASRL/APRL;
//put the ratio and the column index in the map
boolean added = false;
for(Double keyRatio : coeffRatiosMap.keySet()){
if(isZero(ratio - keyRatio)){
coeffRatiosMap.get(keyRatio).add((int)col);
added = true;
break;
}
}
if(!added){
List<Integer> newList = new ArrayList<Integer>();
newList.add((int)col);
coeffRatiosMap.put(ratio, newList);
}
}
//take the ratio(s) with the higher number of column indexes
int maxNumberOfColumn = -1;
List<Integer> candidatedColumns = null;
for(Double keyRatio : coeffRatiosMap.keySet()){
int size = coeffRatiosMap.get(keyRatio).size();
if(size > maxNumberOfColumn){
maxNumberOfColumn = size;
candidatedColumns = coeffRatiosMap.get(keyRatio);
}else if(size == maxNumberOfColumn){
candidatedColumns.addAll(coeffRatiosMap.get(keyRatio));
}
}
//look for the position with less column fill in
short lessFilledColumn = -1;//cannot be greater
int lessFilledColumnLength = this.originalMeq + 1;//cannot be greater
for(short k=0; k<candidatedColumns.size(); k++){
short col = candidatedColumns.get(k).shortValue();
if(vColPositions[col].length>1 && vColPositions[col].length<lessFilledColumnLength ){
lessFilledColumn = col;
lessFilledColumnLength = vColPositions[col].length;
}
}
log.debug("less filled column (" + lessFilledColumn +"): length=" + lessFilledColumnLength);
double APRL = A.getQuick(prow, lessFilledColumn);
double ASRL = A.getQuick(srow, lessFilledColumn);
double alpha = -ASRL/APRL;
b.setQuick(srow, b.getQuick(srow) + alpha*b.getQuick(prow));
//substitute A[prow] with A[prow] * alpha*A[row] for every nz entry of A[row]
for(short t=0; t<vRowPositionsProw.length; t++){
short cc = vRowPositionsProw[t];
double nv = 0.;
if(cc!=lessFilledColumn){
nv = A.getQuick(srow, cc) + alpha*A.getQuick(prow, cc);
}
A.setQuick(srow, cc, nv);
if(isZero(nv)){
vRowPositions[srow] = removeElementFromSortedArray(vRowPositions[srow], cc);
changeColumnsLengthPosition(cc, vColPositions[cc].length, vColPositions[cc].length-1);
vColPositions[cc] = removeElementFromSortedArray(vColPositions[cc], srow);
changeRowsLengthPosition(srow, vRowPositions[srow].length+1, vRowPositions[srow].length);
A.setQuick(srow, cc, 0.);
}
}
this.someReductionDone = true;
stop = true;
i=startingLength-1;//restart, ++ comes from the for loop
break;
}
}
}
}
}
} |
python | def pretty_format_args(*args, **kwargs):
"""
Take the args, and kwargs that are passed them and format in a
prototype style.
"""
args = list([repr(a) for a in args])
for key, value in kwargs.items():
args.append("%s=%s" % (key, repr(value)))
return "(%s)" % ", ".join([a for a in args]) |
java | public static xen_brvpx_image delete(nitro_service client, xen_brvpx_image resource) throws Exception
{
resource.validate("delete");
return ((xen_brvpx_image[]) resource.delete_resource(client))[0];
} |
java | public void inlinePrintNoQuotes(Object o) {
if(inline.length() > 0) {
inline.append(SEPARATOR);
}
// remove newlines
String str = o.toString().replace(NEWLINE, " ");
// escaping
str = str.replace("\\", "\\\\").replace("\"", "\\\"");
inline.append(str);
} |
python | def main():
"""Main function of the converter command-line tool,
``convert2h5features --help`` for a more complete doc."""
args = parse_args()
converter = h5f.Converter(args.output, args.group, args.chunk)
for infile in args.file:
converter.convert(infile) |
java | public Curve25519KeyPair generateKeyPair() {
byte[] privateKey = provider.generatePrivateKey();
byte[] publicKey = provider.generatePublicKey(privateKey);
return new Curve25519KeyPair(publicKey, privateKey);
} |
java | public List<CharacterOrdersHistoryResponse> getCharactersCharacterIdOrdersHistory(Integer characterId,
String datasource, String ifNoneMatch, Integer page, String token) throws ApiException {
ApiResponse<List<CharacterOrdersHistoryResponse>> resp = getCharactersCharacterIdOrdersHistoryWithHttpInfo(
characterId, datasource, ifNoneMatch, page, token);
return resp.getData();
} |
java | @Override
public Collection<Metric> deserialize(JsonElement element, Type type, JsonDeserializationContext context)
throws JsonParseException
{
JsonObject obj = element.getAsJsonObject();
JsonArray metrics = obj.getAsJsonArray("metrics");
List<Metric> values = new ArrayList<Metric>();
if(metrics != null && metrics.isJsonArray())
{
for(JsonElement metric : metrics)
values.add(gson.fromJson(metric, Metric.class));
}
return values;
} |
python | def _histplot_op(values, values2, rotated, ax, hist_kwargs):
"""Add a histogram for the data to the axes."""
if values2 is not None:
raise NotImplementedError("Insert hexbin plot here")
bins = hist_kwargs.pop("bins")
if bins is None:
bins = get_bins(values)
ax.hist(values, bins=bins, **hist_kwargs)
if rotated:
ax.set_yticks(bins[:-1])
else:
ax.set_xticks(bins[:-1])
if hist_kwargs["label"] is not None:
ax.legend()
return ax |
python | def nested_assign(self, key_list, value):
""" Set the value of nested LIVVDicts given a list """
if len(key_list) == 1:
self[key_list[0]] = value
elif len(key_list) > 1:
if key_list[0] not in self:
self[key_list[0]] = LIVVDict()
self[key_list[0]].nested_assign(key_list[1:], value) |
python | def template_tag(self,
arg: Optional[Callable] = None,
*,
name: Optional[str] = None,
pass_context: bool = False,
inject: Optional[Union[bool, Iterable[str]]] = None,
safe: bool = False,
) -> Callable:
"""
Alias for :meth:`template_global`.
:param name: The name of the tag, if different from the function name.
:param pass_context: Whether or not to pass the template context into the tag.
If ``True``, the first argument must be the context.
:param inject: Whether or not this tag needs any dependencies injected.
:param safe: Whether or not to mark the output of this tag as html-safe.
"""
return self.template_global(arg, name=name, pass_context=pass_context,
inject=inject, safe=safe) |
java | public static Object getField(Object obj, String prop) throws PageException {
try {
return getFieldsIgnoreCase(obj.getClass(), prop)[0].get(obj);
}
catch (Throwable e) {
ExceptionUtil.rethrowIfNecessary(e);
throw Caster.toPageException(e);
}
} |
python | def send_mail(subject, message, from_email, recipient_list, html_message='',
scheduled_time=None, headers=None, priority=PRIORITY.medium):
"""
Add a new message to the mail queue. This is a replacement for Django's
``send_mail`` core email method.
"""
subject = force_text(subject)
status = None if priority == PRIORITY.now else STATUS.queued
emails = []
for address in recipient_list:
emails.append(
Email.objects.create(
from_email=from_email, to=address, subject=subject,
message=message, html_message=html_message, status=status,
headers=headers, priority=priority, scheduled_time=scheduled_time
)
)
if priority == PRIORITY.now:
for email in emails:
email.dispatch()
return emails |
java | protected void increaseRefreshInterval() {
refreshInterval = Math.min(REFRESH_INTERVALS.size() - 1, refreshInterval + 1);
// reset view
resetAllParts();
synchronized (refreshThread) {
refreshThread.notify();
}
} |
python | def convert_index_to_keys(d, item):
# use a separate function rather than a method inside the class IndexDict
'''
Convert ``item`` in various types (int, tuple/list, slice, or a normal key)
to a single key or a list of keys.
'''
keys = force_list(d.keys())
# use KeyError for compatibility of normal use
# Warning: int item will be interpreted as the index rather than key!!
if isinstance(item, int):
item = _int_to_key(keys, item)
single = True
elif isinstance(item, (tuple, list)):
item2 = []
for i in item:
i = _int_to_key(keys, i)
item2.append(i)
item = item2
single = False
elif isinstance(item, slice):
start, stop, step = item.start, item.stop, item.step
# None is not interpreted as a key
if not isinstance(start, (NoneType, int)):
try:
start = keys.index(start)
except ValueError:
raise KeyError('%s is not in the list of keys' % (start,))
if not isinstance(stop, (NoneType, int)):
try:
stop = keys.index(stop)
except ValueError:
raise KeyError('%s is not in the list of keys' % (stop,))
item = keys[start:stop:step]
single = False
else: # other types, treated as a single key
IndexDict_check_key_type(item)
single = True
return item, single |
java | public UPnPStateVariable getStateVariable(String name) {
if (name.equals("Time"))
return time;
else if (name.equals("Result"))
return result;
else return null;
} |
python | def slice_hidden(self, x):
"""Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim].
"""
x_sliced = tf.reshape(
x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced |
java | public void write(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out);
// Write out the main attributes for the manifest
attr.writeMain(dos);
// Now write out the pre-entry attributes
Iterator<Map.Entry<String, Attributes>> it = entries.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, Attributes> e = it.next();
StringBuffer buffer = new StringBuffer("Name: ");
String value = e.getKey();
if (value != null) {
byte[] vb = value.getBytes("UTF8");
value = new String(vb, 0, 0, vb.length);
}
buffer.append(value);
buffer.append("\r\n");
make72Safe(buffer);
dos.writeBytes(buffer.toString());
e.getValue().write(dos);
}
dos.flush();
} |
python | def hit_count(self, request, hitcount):
"""
Called with a HttpRequest and HitCount object it will return a
namedtuple:
UpdateHitCountResponse(hit_counted=Boolean, hit_message='Message').
`hit_counted` will be True if the hit was counted and False if it was
not. `'hit_message` will indicate by what means the Hit was either
counted or ignored.
"""
UpdateHitCountResponse = namedtuple(
'UpdateHitCountResponse', 'hit_counted hit_message')
# as of Django 1.8.4 empty sessions are not being saved
# https://code.djangoproject.com/ticket/25489
if request.session.session_key is None:
request.session.save()
user = request.user
try:
is_authenticated_user = user.is_authenticated()
except:
is_authenticated_user = user.is_authenticated
session_key = request.session.session_key
ip = get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', '')[:255]
hits_per_ip_limit = getattr(settings, 'HITCOUNT_HITS_PER_IP_LIMIT', 0)
exclude_user_group = getattr(settings, 'HITCOUNT_EXCLUDE_USER_GROUP', None)
# first, check our request against the IP blacklist
if BlacklistIP.objects.filter(ip__exact=ip):
return UpdateHitCountResponse(
False, 'Not counted: user IP has been blacklisted')
# second, check our request against the user agent blacklist
if BlacklistUserAgent.objects.filter(user_agent__exact=user_agent):
return UpdateHitCountResponse(
False, 'Not counted: user agent has been blacklisted')
# third, see if we are excluding a specific user group or not
if exclude_user_group and is_authenticated_user:
if user.groups.filter(name__in=exclude_user_group):
return UpdateHitCountResponse(
False, 'Not counted: user excluded by group')
# eliminated first three possible exclusions, now on to checking our database of
# active hits to see if we should count another one
# start with a fresh active query set (HITCOUNT_KEEP_HIT_ACTIVE)
qs = Hit.objects.filter_active()
# check limit on hits from a unique ip address (HITCOUNT_HITS_PER_IP_LIMIT)
if hits_per_ip_limit:
if qs.filter(ip__exact=ip).count() >= hits_per_ip_limit:
return UpdateHitCountResponse(
False, 'Not counted: hits per IP address limit reached')
# create a generic Hit object with request data
hit = Hit(session=session_key, hitcount=hitcount, ip=get_ip(request),
user_agent=request.META.get('HTTP_USER_AGENT', '')[:255],)
# first, use a user's authentication to see if they made an earlier hit
if is_authenticated_user:
if not qs.filter(user=user, hitcount=hitcount):
hit.user = user # associate this hit with a user
hit.save()
response = UpdateHitCountResponse(
True, 'Hit counted: user authentication')
else:
response = UpdateHitCountResponse(
False, 'Not counted: authenticated user has active hit')
# if not authenticated, see if we have a repeat session
else:
if not qs.filter(session=session_key, hitcount=hitcount):
hit.save()
response = UpdateHitCountResponse(
True, 'Hit counted: session key')
else:
response = UpdateHitCountResponse(
False, 'Not counted: session key has active hit')
return response |
java | Collection<URL> getURLs(String path) {
// Go through all of the items that match this path and get the URIs for them
Collection<URL> urls = new HashSet<URL>();
for (EntryInfo ei : _entries) {
if (ei.matches(path)) {
urls.addAll(ei.getURLs(path));
}
}
return urls;
} |
python | def create_poi_gdf(polygon=None, amenities=None, north=None, south=None, east=None, west=None):
"""
Parse GeoDataFrames from POI json that was returned by Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the POIs within
amenities: list
List of amenities that will be used for finding the POIs from the selected area.
See available amenities from: http://wiki.openstreetmap.org/wiki/Key:amenity
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
Returns
-------
Geopandas GeoDataFrame with POIs and the associated attributes.
"""
responses = osm_poi_download(polygon=polygon, amenities=amenities, north=north, south=south, east=east, west=west)
# Parse coordinates from all the nodes in the response
coords = parse_nodes_coords(responses)
# POI nodes
poi_nodes = {}
# POI ways
poi_ways = {}
# A list of POI relations
relations = []
for result in responses['elements']:
if result['type'] == 'node' and 'tags' in result:
poi = parse_osm_node(response=result)
# Add element_type
poi['element_type'] = 'node'
# Add to 'pois'
poi_nodes[result['id']] = poi
elif result['type'] == 'way':
# Parse POI area Polygon
poi_area = parse_polygonal_poi(coords=coords, response=result)
if poi_area:
# Add element_type
poi_area['element_type'] = 'way'
# Add to 'poi_ways'
poi_ways[result['id']] = poi_area
elif result['type'] == 'relation':
# Add relation to a relation list (needs to be parsed after all nodes and ways have been parsed)
relations.append(result)
# Create GeoDataFrames
gdf_nodes = gpd.GeoDataFrame(poi_nodes).T
gdf_nodes.crs = settings.default_crs
gdf_ways = gpd.GeoDataFrame(poi_ways).T
gdf_ways.crs = settings.default_crs
# Parse relations (MultiPolygons) from 'ways'
gdf_ways = parse_osm_relations(relations=relations, osm_way_df=gdf_ways)
# Combine GeoDataFrames
gdf = gdf_nodes.append(gdf_ways, sort=False)
return gdf |
java | public Quaternionf fromAxisAngleRad(Vector3fc axis, float angle) {
return fromAxisAngleRad(axis.x(), axis.y(), axis.z(), angle);
} |
python | def info(self):
"""Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK
"""
b = self.bin()
for i in range(len(b), 0, -1):
if b[:i] in self._range[self.v]:
return self._range[self.v][b[:i]]
return 'UNKNOWN' |
java | @JsonProperty("labels")
@JsonInclude(Include.NON_EMPTY)
public Map<String, TermImpl> getLabelUpdates() {
return getMonolingualUpdatedValues(newLabels);
} |
python | def set_timeout(self, delay, err=TimeoutError):
"""Called to set a transaction timer."""
if _debug: IOCB._debug("set_timeout(%d) %r err=%r", self.ioID, delay, err)
# if one has already been created, cancel it
if self.ioTimeout:
self.ioTimeout.suspend_task()
else:
self.ioTimeout = FunctionTask(self.abort, err)
# (re)schedule it
self.ioTimeout.install_task(delay=delay) |
java | @Deprecated
public static ImmutableSet<ExecutableElement> getLocalAndInheritedMethods(
TypeElement type, Elements elementUtils) {
Overrides overrides = new Overrides.NativeOverrides(elementUtils);
return getLocalAndInheritedMethods(type, overrides);
} |
python | def issuer_serial(self):
"""
:return:
A byte string of the SHA-256 hash of the issuer concatenated with
the ascii character ":", concatenated with the serial number as
an ascii string
"""
if self._issuer_serial is None:
self._issuer_serial = self.issuer.sha256 + b':' + str_cls(self.serial_number).encode('ascii')
return self._issuer_serial |
java | public final KeyStore createPkcs12KeyStore(String keyName, char[] password, PrivateKey privateKey, X509Certificate certificate) {
logger.entry();
List<X509Certificate> certificates = new ArrayList<>();
certificates.add(certificate);
KeyStore keyStore = createPkcs12KeyStore(keyName, password, privateKey, certificates);
logger.exit();
return keyStore;
} |
java | public com.squareup.okhttp.Call getFleetsFleetIdWingsAsync(Long fleetId, String acceptLanguage, String datasource,
String ifNoneMatch, String language, String token, final ApiCallback<List<FleetWingsResponse>> callback)
throws ApiException {
com.squareup.okhttp.Call call = getFleetsFleetIdWingsValidateBeforeCall(fleetId, acceptLanguage, datasource,
ifNoneMatch, language, token, callback);
Type localVarReturnType = new TypeToken<List<FleetWingsResponse>>() {
}.getType();
apiClient.executeAsync(call, localVarReturnType, callback);
return call;
} |
python | async def update_data(self, *,
chat: typing.Union[str, int, None] = None,
user: typing.Union[str, int, None] = None,
data: typing.Dict = None,
**kwargs):
"""
Update data for user in chat
You can use data parameter or|and kwargs.
Chat or user is always required. If one of them is not provided,
you have to set missing value based on the provided one.
:param data:
:param chat:
:param user:
:param kwargs:
:return:
"""
raise NotImplementedError |
python | def more_like_this(self, query, fields, columns=None, start=0, rows=30):
"""
Retrieves "more like this" results for a passed query document
query - query for a document on which to base similar documents
fields - fields on which to base similarity estimation (either comma delimited string or a list)
columns - columns to return (list of strings)
start - start number for first result (used in pagination)
rows - number of rows to return (used for pagination, defaults to 30)
"""
if isinstance(fields, basestring):
mlt_fields = fields
else:
mlt_fields = ",".join(fields)
if columns is None:
columns = ["*", "score"]
fields = {'q' : query,
'json.nl': 'map',
'mlt.fl': mlt_fields,
'fl': ",".join(columns),
'start': str(start),
'rows': str(rows),
'wt': "json"}
if len(self.endpoints) > 1:
fields["shards"] = self._get_shards()
assert self.default_endpoint in self.endpoints
request_url = _get_url(self.endpoints[self.default_endpoint], "mlt")
results = self._send_solr_query(request_url, fields)
if not results:
return None
assert "responseHeader" in results
# Check for response status
if not results.get("responseHeader").get("status") == 0:
logger.error("Server error while retrieving results: %s", results)
return None
assert "response" in results
result_obj = self._parse_response(results)
return result_obj |
python | def ptb_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y |
java | public void stop() {
try {
mRMClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, "", "");
} catch (YarnException e) {
LOG.error("Failed to unregister application", e);
} catch (IOException e) {
LOG.error("Failed to unregister application", e);
}
mRMClient.stop();
// TODO(andrew): Think about whether we should stop mNMClient here
mYarnClient.stop();
} |
python | def parse_prokka(self, f):
""" Parse prokka txt summary files.
Prokka summary files are difficult to identify as there are practically
no distinct prokka identifiers in the filenames or file contents. This
parser makes an attempt using the first three lines, expected to contain
organism, contigs, and bases statistics.
"""
s_name = None
# Look at the first three lines, they are always the same
first_line = f['f'].readline()
contigs_line = f['f'].readline()
bases_line = f['f'].readline()
# If any of these fail, it's probably not a prokka summary file
if not all((first_line.startswith("organism:"),
contigs_line.startswith("contigs:"),
bases_line.startswith("bases:"))):
return
# Get organism and sample name from the first line
# Assumes organism name only consists of two words,
# i.e. 'Genusname speciesname', and that the remaining
# text on the organism line is the sample name.
try:
organism = " ".join(first_line.strip().split(":", 1)[1].split()[:2])
s_name = self.clean_s_name(" ".join(first_line.split()[3:]), f['root'])
except KeyError:
organism = first_line.strip().split(":", 1)[1]
s_name = f['s_name']
# Don't try to guess sample name if requested in the config
if getattr(config, 'prokka_fn_snames', False):
s_name = f['s_name']
if s_name in self.prokka:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.prokka[s_name] = dict()
self.prokka[s_name]['organism'] = organism
self.prokka[s_name]['contigs'] = int(contigs_line.split(":")[1])
self.prokka[s_name]['bases'] = int(bases_line.split(":")[1])
# Get additional info from remaining lines
for line in f['f']:
description, value = line.split(":")
try:
self.prokka[s_name][description] = int(value)
except ValueError:
log.warning("Unable to parse line: '%s'", line)
self.add_data_source(f, s_name) |
java | public ConfigurationBuilder withSerialPort(String serialPort, int baudRate) {
configuration.connector = new SerialConnector();
configuration.address = new SerialAddress(serialPort, baudRate, DataBits.DATABITS_8, StopBits.BITS_1, Parity.NONE, FlowControl.NONE );
return this;
} |
java | @NonNull
public IconicsDrawable icon(@NonNull String icon) {
try {
ITypeface font = Iconics.findFont(mContext, icon.substring(0, 3));
icon = icon.replace("-", "_");
icon(font.getIcon(icon));
} catch (Exception ex) {
Log.e(Iconics.TAG, "Wrong icon name: " + icon);
}
return this;
} |
java | public UserRealm addRealm(UserRealm realm)
{
return (UserRealm)_realmMap.put(realm.getName(),realm);
} |
Subsets and Splits