language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def new_pic_inline(cls, shape_id, rId, filename, cx, cy):
"""
Return a new `wp:inline` element containing the `pic:pic` element
specified by the argument values.
"""
pic_id = 0 # Word doesn't seem to use this, but does not omit it
pic = CT_Picture.new(pic_id, filename, rId, cx, cy)
inline = cls.new(cx, cy, shape_id, pic)
inline.graphic.graphicData._insert_pic(pic)
return inline |
java | public boolean failsChecks(String text, CheckResult checkResult) {
int length = text.length();
int result = 0;
if (checkResult != null) {
checkResult.position = 0;
checkResult.numerics = null;
checkResult.restrictionLevel = null;
}
if (0 != (this.fChecks & RESTRICTION_LEVEL)) {
RestrictionLevel textRestrictionLevel = getRestrictionLevel(text);
if (textRestrictionLevel.compareTo(fRestrictionLevel) > 0) {
result |= RESTRICTION_LEVEL;
}
if (checkResult != null) {
checkResult.restrictionLevel = textRestrictionLevel;
}
}
if (0 != (this.fChecks & MIXED_NUMBERS)) {
UnicodeSet numerics = new UnicodeSet();
getNumerics(text, numerics);
if (numerics.size() > 1) {
result |= MIXED_NUMBERS;
}
if (checkResult != null) {
checkResult.numerics = numerics;
}
}
if (0 != (this.fChecks & CHAR_LIMIT)) {
int i;
int c;
for (i = 0; i < length;) {
// U16_NEXT(text, i, length, c);
c = Character.codePointAt(text, i);
i = Character.offsetByCodePoints(text, i, 1);
if (!this.fAllowedCharsSet.contains(c)) {
result |= CHAR_LIMIT;
break;
}
}
}
if (0 != (this.fChecks & INVISIBLE)) {
// This check needs to be done on NFD input
String nfdText = nfdNormalizer.normalize(text);
// scan for more than one occurrence of the same non-spacing mark
// in a sequence of non-spacing marks.
int i;
int c;
int firstNonspacingMark = 0;
boolean haveMultipleMarks = false;
UnicodeSet marksSeenSoFar = new UnicodeSet(); // Set of combining marks in a
// single combining sequence.
for (i = 0; i < length;) {
c = Character.codePointAt(nfdText, i);
i = Character.offsetByCodePoints(nfdText, i, 1);
if (Character.getType(c) != UCharacterCategory.NON_SPACING_MARK) {
firstNonspacingMark = 0;
if (haveMultipleMarks) {
marksSeenSoFar.clear();
haveMultipleMarks = false;
}
continue;
}
if (firstNonspacingMark == 0) {
firstNonspacingMark = c;
continue;
}
if (!haveMultipleMarks) {
marksSeenSoFar.add(firstNonspacingMark);
haveMultipleMarks = true;
}
if (marksSeenSoFar.contains(c)) {
// report the error, and stop scanning.
// No need to find more than the first failure.
result |= INVISIBLE;
break;
}
marksSeenSoFar.add(c);
}
}
if (checkResult != null) {
checkResult.checks = result;
}
return (0 != result);
} |
python | def as_text(self, is_proof=True, is_pretty=False):
"""Return the DDO as a JSON text.
:param if is_proof: if False then do not include the 'proof' element.
:param is_pretty: If True return dictionary in a prettier way, bool
:return: str
"""
data = self.as_dictionary(is_proof)
if is_pretty:
return json.dumps(data, indent=2, separators=(',', ': '))
return json.dumps(data) |
java | public HttpSession getSession(boolean create)
{
// 321485
if (TraceComponent.isAnyTracingEnabled()&&logger.isLoggable (Level.FINE)) { //306998.15
logger.logp(Level.FINE, CLASS_NAME,"getSession", "create " + String.valueOf(create) + ", this -> "+this);
}
if (WCCustomProperties.CHECK_REQUEST_OBJECT_IN_USE){
checkRequestObjectInUse();
}
// return _connContext.getSessionAPISupport().getSession(create);
return _requestContext.getSession(create, ((WebAppDispatcherContext) this.getDispatchContext()).getWebApp());
} |
java | public JSONObject antiPornGif(byte[] imgData) {
AipRequest request = new AipRequest();
// check param
JSONObject checkRet = checkImgFormat(imgData, "gif");
if (!"0".equals(checkRet.getString("error_code"))) {
return checkRet;
}
preOperation(request);
// add API params
String base64Content = Base64Util.encode(imgData);
request.addBody("image", base64Content);
request.setUri(ContentCensorConsts.ANTI_PORN_GIF_URL);
postOperation(request);
return requestServer(request);
} |
java | public NotificationChain basicSetMemberCallTarget(XExpression newMemberCallTarget, NotificationChain msgs)
{
XExpression oldMemberCallTarget = memberCallTarget;
memberCallTarget = newMemberCallTarget;
if (eNotificationRequired())
{
ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, XbasePackage.XMEMBER_FEATURE_CALL__MEMBER_CALL_TARGET, oldMemberCallTarget, newMemberCallTarget);
if (msgs == null) msgs = notification; else msgs.add(notification);
}
return msgs;
} |
java | public alluxio.grpc.MasterHeartbeatPOptionsOrBuilder getOptionsOrBuilder() {
return options_ == null ? alluxio.grpc.MasterHeartbeatPOptions.getDefaultInstance() : options_;
} |
python | def parse_resources_directory(self, rva, size=0, base_rva = None, level = 0, dirs=None):
"""Parse the resources directory.
Given the RVA of the resources directory, it will process all
its entries.
The root will have the corresponding member of its structure,
IMAGE_RESOURCE_DIRECTORY plus 'entries', a list of all the
entries in the directory.
Those entries will have, correspondingly, all the structure's
members (IMAGE_RESOURCE_DIRECTORY_ENTRY) and an additional one,
"directory", pointing to the IMAGE_RESOURCE_DIRECTORY structure
representing upper layers of the tree. This one will also have
an 'entries' attribute, pointing to the 3rd, and last, level.
Another directory with more entries. Those last entries will
have a new attribute (both 'leaf' or 'data_entry' can be used to
access it). This structure finally points to the resource data.
All the members of this structure, IMAGE_RESOURCE_DATA_ENTRY,
are available as its attributes.
"""
# OC Patch:
if dirs is None:
dirs = [rva]
if base_rva is None:
base_rva = rva
resources_section = self.get_section_by_rva(rva)
try:
# If the RVA is invalid all would blow up. Some EXEs seem to be
# specially nasty and have an invalid RVA.
data = self.get_data(rva, Structure(self.__IMAGE_RESOURCE_DIRECTORY_format__).sizeof() )
except PEFormatError, e:
self.__warnings.append(
'Invalid resources directory. Can\'t read ' +
'directory data at RVA: 0x%x' % rva)
return None
# Get the resource directory structure, that is, the header
# of the table preceding the actual entries
#
resource_dir = self.__unpack_data__(
self.__IMAGE_RESOURCE_DIRECTORY_format__, data,
file_offset = self.get_offset_from_rva(rva) )
if resource_dir is None:
# If can't parse resources directory then silently return.
# This directory does not necessarily have to be valid to
# still have a valid PE file
self.__warnings.append(
'Invalid resources directory. Can\'t parse ' +
'directory data at RVA: 0x%x' % rva)
return None
dir_entries = []
# Advance the RVA to the positon immediately following the directory
# table header and pointing to the first entry in the table
#
rva += resource_dir.sizeof()
number_of_entries = (
resource_dir.NumberOfNamedEntries +
resource_dir.NumberOfIdEntries )
# Set a hard limit on the maximum resonable number of entries
MAX_ALLOWED_ENTRIES = 4096
if number_of_entries > MAX_ALLOWED_ENTRIES:
self.__warnings.append(
'Error parsing the resources directory, '
'The directory contains %d entries (>%s)' %
(number_of_entries, MAX_ALLOWED_ENTRIES) )
return None
strings_to_postprocess = list()
for idx in xrange(number_of_entries):
res = self.parse_resource_entry(rva)
if res is None:
self.__warnings.append(
'Error parsing the resources directory, '
'Entry %d is invalid, RVA = 0x%x. ' %
(idx, rva) )
break
entry_name = None
entry_id = None
# If all named entries have been processed, only Id ones
# remain
if idx >= resource_dir.NumberOfNamedEntries:
entry_id = res.Name
else:
ustr_offset = base_rva+res.NameOffset
try:
#entry_name = self.get_string_u_at_rva(ustr_offset, max_length=16)
entry_name = UnicodeStringWrapperPostProcessor(self, ustr_offset)
strings_to_postprocess.append(entry_name)
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the resources directory, '
'attempting to read entry name. '
'Can\'t read unicode string at offset 0x%x' %
(ustr_offset) )
if res.DataIsDirectory:
# OC Patch:
#
# One trick malware can do is to recursively reference
# the next directory. This causes hilarity to ensue when
# trying to parse everything correctly.
# If the original RVA given to this function is equal to
# the next one to parse, we assume that it's a trick.
# Instead of raising a PEFormatError this would skip some
# reasonable data so we just break.
#
# 9ee4d0a0caf095314fd7041a3e4404dc is the offending sample
if (base_rva + res.OffsetToDirectory) in dirs:
break
else:
entry_directory = self.parse_resources_directory(
base_rva+res.OffsetToDirectory,
size-(rva-base_rva), # size
base_rva=base_rva, level = level+1,
dirs=dirs + [base_rva + res.OffsetToDirectory])
if not entry_directory:
break
# Ange Albertini's code to process resources' strings
#
strings = None
if entry_id == RESOURCE_TYPE['RT_STRING']:
strings = dict()
for resource_id in entry_directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
resource_strings = dict()
string_entry_rva = resource_lang.data.struct.OffsetToData
string_entry_size = resource_lang.data.struct.Size
string_entry_id = resource_id.id
if resource_lang.data.struct.Size is None or resource_id.id is None:
continue
string_entry_data = self.get_data(string_entry_rva, string_entry_size)
parse_strings( string_entry_data, (int(string_entry_id) - 1) * 16, resource_strings )
strings.update(resource_strings)
resource_id.directory.strings = resource_strings
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
directory = entry_directory))
else:
struct = self.parse_resource_data_entry(
base_rva + res.OffsetToDirectory)
if struct:
entry_data = ResourceDataEntryData(
struct = struct,
lang = res.Name & 0x3ff,
sublang = res.Name >> 10 )
dir_entries.append(
ResourceDirEntryData(
struct = res,
name = entry_name,
id = entry_id,
data = entry_data))
else:
break
# Check if this entry contains version information
#
if level == 0 and res.Id == RESOURCE_TYPE['RT_VERSION']:
if len(dir_entries)>0:
last_entry = dir_entries[-1]
rt_version_struct = None
try:
rt_version_struct = last_entry.directory.entries[0].directory.entries[0].data.struct
except:
# Maybe a malformed directory structure...?
# Lets ignore it
pass
if rt_version_struct is not None:
self.parse_version_information(rt_version_struct)
rva += res.sizeof()
string_rvas = [s.get_rva() for s in strings_to_postprocess]
string_rvas.sort()
for idx, s in enumerate(strings_to_postprocess):
s.render_pascal_16()
resource_directory_data = ResourceDirData(
struct = resource_dir,
entries = dir_entries)
return resource_directory_data |
python | def get_candidate_config(self, merge=False, formal=False):
"""
Retrieve the configuration loaded as candidate config in your configuration session.
:param merge: Merge candidate config with running config to return
the complete configuration including all changed
:param formal: Return configuration in IOS-XR formal config format
"""
command = "show configuration"
if merge:
command += " merge"
if formal:
command += " formal"
response = self._execute_config_show(command)
match = re.search(".*(!! IOS XR Configuration.*)$", response, re.DOTALL)
if match is not None:
response = match.group(1)
return response |
java | protected int findBestMatch(double[] p) {
int bestCluster = -1;
bestDistance = Double.MAX_VALUE;
for (int j = 0; j < clusters.size; j++) {
double d = distanceSq(p,clusters.get(j));
if( d < bestDistance ) {
bestDistance = d;
bestCluster = j;
}
}
return bestCluster;
} |
python | def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
"""
Delete this DBInstance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether a final
db snapshot is created before the instance
is deleted. If True, no snapshot is created.
If False, a snapshot is created before
deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
return self.connection.delete_dbinstance(self.id,
skip_final_snapshot,
final_snapshot_id) |
python | def load_parameter_file(file_path, name = ''):
"""
Load parameters from a YAML file (or a directory containing YAML files).
:returns: An instance of :any:`ParameterNode` or :any:`Scale` or :any:`Parameter`.
"""
if not os.path.exists(file_path):
raise ValueError("{} doest not exist".format(file_path))
if os.path.isdir(file_path):
return ParameterNode(name, directory_path = file_path)
data = _load_yaml_file(file_path)
return _parse_child(name, data, file_path) |
java | public Space merge(Space other) {
float minx = Math.min(x, other.x);
float miny = Math.min(y, other.y);
float newwidth = width+other.width;
float newheight = height+other.height;
if (x == other.x) {
newwidth = width;
} else {
newheight = height;
}
return new Space(minx, miny, newwidth, newheight);
} |
python | def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber) |
java | public void marshall(CreateComputerRequest createComputerRequest, ProtocolMarshaller protocolMarshaller) {
if (createComputerRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(createComputerRequest.getDirectoryId(), DIRECTORYID_BINDING);
protocolMarshaller.marshall(createComputerRequest.getComputerName(), COMPUTERNAME_BINDING);
protocolMarshaller.marshall(createComputerRequest.getPassword(), PASSWORD_BINDING);
protocolMarshaller.marshall(createComputerRequest.getOrganizationalUnitDistinguishedName(), ORGANIZATIONALUNITDISTINGUISHEDNAME_BINDING);
protocolMarshaller.marshall(createComputerRequest.getComputerAttributes(), COMPUTERATTRIBUTES_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public static Parser<Void> many1(CharPredicate predicate) {
return Patterns.many1(predicate).toScanner(predicate + "+");
} |
java | @SuppressWarnings("unchecked")
public static void executeCommand(String[] args) throws IOException {
OptionParser parser = getParser();
// declare parameters
List<String> metaKeys = null;
String url = null;
// parse command-line input
args = AdminToolUtils.copyArrayAddFirst(args, "--" + OPT_HEAD_META_CHECK);
OptionSet options = parser.parse(args);
if(options.has(AdminParserUtils.OPT_HELP)) {
printHelp(System.out);
return;
}
// check required options and/or conflicting options
AdminParserUtils.checkRequired(options, OPT_HEAD_META_CHECK);
AdminParserUtils.checkRequired(options, AdminParserUtils.OPT_URL);
// load parameters
metaKeys = (List<String>) options.valuesOf(OPT_HEAD_META_CHECK);
url = (String) options.valueOf(AdminParserUtils.OPT_URL);
// execute command
if(metaKeys.size() == 0
|| (metaKeys.size() == 1 && metaKeys.get(0).equals(METAKEY_ALL))) {
metaKeys = Lists.newArrayList();
metaKeys.add(MetadataStore.CLUSTER_KEY);
metaKeys.add(MetadataStore.STORES_KEY);
metaKeys.add(MetadataStore.SERVER_STATE_KEY);
}
AdminClient adminClient = AdminToolUtils.getAdminClient(url);
doMetaCheck(adminClient, metaKeys);
} |
java | protected void ensureTransitionAllowed(CmmnActivityExecution execution, CaseExecutionState expected, CaseExecutionState target, String transition) {
String id = execution.getId();
CaseExecutionState currentState = execution.getCurrentState();
// the state "suspending" or "terminating" will set immediately
// inside the corresponding AtomicOperation, that's why the
// previous state will be used to ensure that the transition
// is allowed.
if (execution.isTerminating() || execution.isSuspending()) {
currentState = execution.getPreviousState();
}
// is the case execution already in the target state
if (target.equals(currentState)) {
throw LOG.isAlreadyInStateException(transition, id, target);
} else
// is the case execution in the expected state
if (!expected.equals(currentState)) {
throw LOG.unexpectedStateException(transition, id, expected, currentState);
}
} |
java | public ResourceBundle getResourceBundle(FacesContext ctx, String name) {
if (defaultApplication != null) {
return defaultApplication.getResourceBundle(ctx, name);
}
throw new UnsupportedOperationException();
} |
java | private void copyBlock(DataInputStream in,
VersionAndOpcode versionAndOpcode) throws IOException {
// Read in the header
CopyBlockHeader copyBlockHeader = new CopyBlockHeader(versionAndOpcode);
copyBlockHeader.readFields(in);
long startTime = System.currentTimeMillis();
int namespaceId = copyBlockHeader.getNamespaceId();
long blockId = copyBlockHeader.getBlockId();
long genStamp = copyBlockHeader.getGenStamp();
Block block = new Block(blockId, 0, genStamp);
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
LOG.info("Not able to copy block " + blockId + " to "
+ s.getRemoteSocketAddress() + " because threads quota is exceeded.");
return;
}
BlockSender blockSender = null;
DataOutputStream reply = null;
boolean isOpSuccess = true;
updateCurrentThreadName("Copying block " + block);
try {
// check if the block exists or not
blockSender = new BlockSender(namespaceId, block, 0, -1, false, false, false,
false,
versionAndOpcode.getDataTransferVersion() >=
DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION, true,
datanode, null);
// set up response stream
OutputStream baseStream = NetUtils.getOutputStream(
s, datanode.socketWriteTimeout);
reply = new DataOutputStream(new BufferedOutputStream(
baseStream, SMALL_BUFFER_SIZE));
// send block content to the target
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
long readDuration = System.currentTimeMillis() - startTime;
datanode.myMetrics.bytesReadLatency.inc(readDuration);
datanode.myMetrics.bytesRead.inc((int) read);
if (read > KB_RIGHT_SHIFT_MIN) {
datanode.myMetrics.bytesReadRate.inc((int) (read >> KB_RIGHT_SHIFT_BITS),
readDuration);
}
datanode.myMetrics.blocksRead.inc();
LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
isOpSuccess = false;
throw ioe;
} finally {
dataXceiverServer.balanceThrottler.release();
if (isOpSuccess) {
try {
// send one last byte to indicate that the resource is cleaned.
reply.writeChar('d');
} catch (IOException ignored) {
}
}
IOUtils.closeStream(reply);
IOUtils.closeStream(blockSender);
}
} |
java | public OvhTask serviceName_partition_partitionName_snapshot_POST(String serviceName, String partitionName, OvhSnapshotEnum snapshotType) throws IOException {
String qPath = "/dedicated/nasha/{serviceName}/partition/{partitionName}/snapshot";
StringBuilder sb = path(qPath, serviceName, partitionName);
HashMap<String, Object>o = new HashMap<String, Object>();
addBody(o, "snapshotType", snapshotType);
String resp = exec(qPath, "POST", sb.toString(), o);
return convertTo(resp, OvhTask.class);
} |
python | def __is_valid_value_for_arg(self, arg, value, check_extension=True):
"""Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise
"""
if "values" not in arg and "extension_values" not in arg:
return True
if "values" in arg and value.lower() in arg["values"]:
return True
if "extension_values" in arg:
extension = arg["extension_values"].get(value.lower())
if extension:
condition = (
check_extension and
extension not in RequireCommand.loaded_extensions
)
if condition:
raise ExtensionNotLoaded(extension)
return True
return False |
java | ServiceName serviceName(final ModelNode operation) {
String name = null;
PathAddress pa = PathAddress.pathAddress(operation.require(OP_ADDR));
for (int i = pa.size() - 1; i > 0; i--) {
PathElement pe = pa.getElement(i);
if (key.equals(pe.getKey())) {
name = pe.getValue();
break;
}
}
if (name == null) {
throw ROOT_LOGGER.operationAddressMissingKey(key);
}
return serviceName(name);
} |
java | public static void main(String[] args) throws IOException {
if (args.length < 2) {
System.err.println("Usage: JarHelper jarname.jar directory");
return;
}
JarHelper jarHelper = new JarHelper();
jarHelper.mVerbose = true;
File destJar = new File(args[0]);
File dirOrFile2Jar = new File(args[1]);
jarHelper.jarDir(dirOrFile2Jar, destJar);
} |
java | public List<AlternativeInfo> calcAlternatives(int from, int to) {
AlternativeBidirSearch altBidirDijktra = new AlternativeBidirSearch(
graph, weighting, traversalMode, maxExplorationFactor * 2);
altBidirDijktra.setMaxVisitedNodes(maxVisitedNodes);
if (weightApproximator != null) {
altBidirDijktra.setApproximation(weightApproximator);
}
altBidirDijktra.searchBest(from, to);
visitedNodes = altBidirDijktra.getVisitedNodes();
List<AlternativeInfo> alternatives = altBidirDijktra.
calcAlternatives(maxPaths, maxWeightFactor, 7, maxShareFactor, 0.8, minPlateauFactor, -0.2);
return alternatives;
} |
python | def create_service(self, *args, **kwargs):
"""Create a service to current scope.
See :class:`pykechain.Client.create_service` for available parameters.
.. versionadded:: 1.13
"""
return self._client.create_service(*args, scope=self.id, **kwargs) |
python | def _check_element(self, lookup_strings, instance):
"""Return True if lookup string/value pairs match against the passed
object.
"""
for q, val in lookup_strings.items():
if not field_lookup(instance, q, val, True):
return False
return True |
python | def create_wf_instances(self, roles=None):
"""
Creates wf instances.
Args:
roles (list): role list
Returns:
(list): wf instances
"""
# if roles specified then create an instance for each role
# else create only one instance
if roles:
wf_instances = [
WFInstance(
wf=self.wf,
current_actor=role,
task=self,
name=self.wf.name
) for role in roles
]
else:
wf_instances = [
WFInstance(
wf=self.wf,
task=self,
name=self.wf.name
)
]
# if task type is not related with objects save instances immediately.
if self.task_type in ["C", "D"]:
return [wfi.save() for wfi in wf_instances]
# if task type is related with its objects, save populate instances per object
else:
wf_obj_instances = []
for wfi in wf_instances:
role = wfi.current_actor if self.task_type == "A" else None
keys = self.get_object_keys(role)
wf_obj_instances.extend(
[WFInstance(
wf=self.wf,
current_actor=role,
task=self,
name=self.wf.name,
wf_object=key,
wf_object_type=self.object_type
).save() for key in keys]
)
return wf_obj_instances |
java | private static void unregisterMBean(String name) {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try {
synchronized (mbs) {
ObjectName objName = new ObjectName(name);
if (mbs.isRegistered(objName)) {
mbs.unregisterMBean(objName);
}
}
} catch (Exception e) {
e.printStackTrace();
}
} |
java | public static SynchronizedGeneratorIdentity basedOn(String quorum,
String znode,
Supplier<Duration> claimDurationSupplier)
throws IOException {
ZooKeeperConnection zooKeeperConnection = new ZooKeeperConnection(quorum);
int clusterId = ClusterID.get(zooKeeperConnection.getActiveConnection(), znode);
return new SynchronizedGeneratorIdentity(zooKeeperConnection, znode, clusterId, claimDurationSupplier);
} |
python | def teach_students(self):
"""
Train each model (student) with the labeled data using bootstrap
aggregating (bagging).
"""
dataset = self.dataset
for student in self.students:
bag = self._labeled_uniform_sample(int(dataset.len_labeled()))
while bag.get_num_of_labels() != dataset.get_num_of_labels():
bag = self._labeled_uniform_sample(int(dataset.len_labeled()))
LOGGER.warning('There is student receiving only one label,'
're-sample the bag.')
student.train(bag) |
java | public DescribeVpcEndpointsRequest withVpcEndpointIds(String... vpcEndpointIds) {
if (this.vpcEndpointIds == null) {
setVpcEndpointIds(new com.amazonaws.internal.SdkInternalList<String>(vpcEndpointIds.length));
}
for (String ele : vpcEndpointIds) {
this.vpcEndpointIds.add(ele);
}
return this;
} |
python | def namedb_preorder_insert( cur, preorder_rec ):
"""
Add a name or namespace preorder record, if it doesn't exist already.
DO NOT CALL THIS DIRECTLY.
"""
preorder_row = copy.deepcopy( preorder_rec )
assert 'preorder_hash' in preorder_row, "BUG: missing preorder_hash"
try:
preorder_query, preorder_values = namedb_insert_prepare( cur, preorder_row, "preorders" )
except Exception, e:
log.exception(e)
log.error("FATAL: Failed to insert name preorder '%s'" % preorder_row['preorder_hash'])
os.abort()
namedb_query_execute( cur, preorder_query, preorder_values )
return True |
java | @Override
public synchronized void dropEntity(String entity) throws DatabaseEngineException {
if (!containsEntity(entity)) {
return;
}
dropEntity(entities.get(entity).getEntity());
} |
java | @Override
public UniversalKafkaQueue init() throws Exception {
super.init();
if (getMessageFactory() == null) {
setMessageFactory(UniversalIdIntQueueMessageFactory.INSTANCE);
}
return this;
} |
python | def iterator(self):
"""
Add the default language information to all returned objects.
"""
default_language = getattr(self, '_default_language', None)
for obj in super(MultilingualModelQuerySet, self).iterator():
obj._default_language = default_language
yield obj |
java | public static <T1, T2, R1, R2, R> CompletableFuture<R> forEach3(CompletableFuture<? extends T1> value1,
Function<? super T1, ? extends CompletableFuture<R1>> value2,
BiFunction<? super T1, ? super R1, ? extends CompletableFuture<R2>> value3,
Function3<? super T1, ? super R1, ? super R2, ? extends R> yieldingFunction) {
return value1.thenCompose(in -> {
CompletableFuture<R1> a = value2.apply(in);
return a.thenCompose(ina -> {
CompletableFuture<R2> b = value3.apply(in,ina);
return b.thenApply(in2 -> yieldingFunction.apply(in, ina, in2));
});
});
} |
python | def op(scalars_layout, collections=None):
"""Creates a summary that contains a layout.
When users navigate to the custom scalars dashboard, they will see a layout
based on the proto provided to this function.
Args:
scalars_layout: The scalars_layout_pb2.Layout proto that specifies the
layout.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A tensor summary op that writes the layout to disk.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
assert isinstance(scalars_layout, layout_pb2.Layout)
summary_metadata = metadata.create_summary_metadata()
return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,
tensor=tf.constant(
scalars_layout.SerializeToString(),
dtype=tf.string),
collections=collections,
summary_metadata=summary_metadata) |
java | public static List<String> getUnixGroups(String user) throws IOException {
String result;
List<String> groups = new ArrayList<>();
try {
result = ShellUtils.execCommand(ShellUtils.getGroupsForUserCommand(user));
} catch (ExitCodeException e) {
// if we didn't get the group - just return empty list
LOG.warn("got exception trying to get groups for user " + user + ": " + e.getMessage());
return groups;
}
StringTokenizer tokenizer = new StringTokenizer(result, ShellUtils.TOKEN_SEPARATOR_REGEX);
while (tokenizer.hasMoreTokens()) {
groups.add(tokenizer.nextToken());
}
return groups;
} |
python | def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key) |
java | public Shape[] union(Shape target, Shape other) {
target = target.transform(new Transform());
other = other.transform(new Transform());
if (!target.intersects(other)) {
return new Shape[] {target, other};
}
// handle the case where intersects is true but really we're talking
// about edge points
boolean touches = false;
int buttCount = 0;
for (int i=0;i<target.getPointCount();i++) {
if (other.contains(target.getPoint(i)[0], target.getPoint(i)[1])) {
if (!other.hasVertex(target.getPoint(i)[0], target.getPoint(i)[1])) {
touches = true;
break;
}
}
if (other.hasVertex(target.getPoint(i)[0], target.getPoint(i)[1])) {
buttCount++;
}
}
for (int i=0;i<other.getPointCount();i++) {
if (target.contains(other.getPoint(i)[0], other.getPoint(i)[1])) {
if (!target.hasVertex(other.getPoint(i)[0], other.getPoint(i)[1])) {
touches = true;
break;
}
}
}
if ((!touches) && (buttCount < 2)) {
return new Shape[] {target, other};
}
// so they are definitely touching, consider the union
return combine(target, other, false);
} |
python | def _h_function(self,h):
""" private method for the spherical variogram "h" function
Parameters
----------
h : (float or numpy.ndarray)
distance(s)
Returns
-------
h_function : float or numpy.ndarray
the value of the "h" function implied by the SphVario
"""
hh = h / self.a
h = self.contribution * (1.0 - (hh * (1.5 - (0.5 * hh * hh))))
h[hh > 1.0] = 0.0
return h |
python | def _set_property(xml_root, name, value, properties=None):
"""Sets property to specified value."""
if properties is None:
properties = xml_root.find("properties")
for prop in properties:
if prop.get("name") == name:
prop.set("value", utils.get_unicode_str(value))
break
else:
etree.SubElement(
properties, "property", {"name": name, "value": utils.get_unicode_str(value)}
) |
python | def _listdir(pth, extensions):
"""Non-raising listdir."""
try:
return [fname for fname in os.listdir(pth)
if os.path.splitext(fname)[1] in extensions]
except OSError: # pragma: nocover
pass |
python | def get_polygons(self):
"""
Retrieves all of the user's polygons registered on the Agro API.
:returns: list of `pyowm.agro10.polygon.Polygon` objects
"""
status, data = self.http_client.get_json(
POLYGONS_URI,
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [Polygon.from_dict(item) for item in data] |
python | def aggregate(self, block_size):
'''
geo.aggregate(block_size)
Returns copy of raster aggregated to smaller resolution, by adding cells.
'''
raster2 = block_reduce(self.raster, block_size, func=np.ma.sum)
geot = self.geot
geot = (geot[0], block_size[0] * geot[1], geot[2], geot[3], geot[4],
block_size[1] * geot[-1])
return GeoRaster(raster2, geot, nodata_value=self.nodata_value,\
projection=self.projection, datatype=self.datatype) |
python | def dft_optsize(im, shape=None):
"""Resize image for optimal DFT and computes it
Parameters
----------
im: 2d array
The image
shape: 2 numbers, optional
The shape of the output image (None will optimize the shape)
Returns
-------
dft: 2d array
The dft in CCS representation
Notes
-----
Th shape shoulb be a product of 2, 3, and 5
"""
im = np.asarray(im)
# save shape
initshape = im.shape
# get optimal size
if shape is None:
ys = cv2.getOptimalDFTSize(initshape[0])
xs = cv2.getOptimalDFTSize(initshape[1])
shape = [ys, xs]
# Add zeros to go to optimal size
im = cv2.copyMakeBorder(im, 0, shape[0] - initshape[0],
0, shape[1] - initshape[1],
borderType=cv2.BORDER_CONSTANT, value=0)
# Compute dft ignoring 0 rows (0 columns can not be optimized)
f = cv2.dft(im, nonzeroRows=initshape[0])
return f |
java | public static final <T> Stream<T> diGraph(T root, Function<? super T, ? extends Stream<T>> edges)
{
return DiGraphIterator.stream(root, edges);
} |
python | def is_numeric(property_name, *, numtype="float", min=None, max=None,
present_optional=False, message=None):
"""Returns a Validation that checks a property as a number, with optional range constraints."""
if numtype == "int":
cast = util.try_parse_int
elif numtype == "decimal":
cast = util.try_parse_decimal
elif numtype == "float":
cast = util.try_parse_float
else:
raise ValueError("numtype argument must be one of: int, decimal, float")
def check(val):
"""Checks that a value can be parsed as a number."""
if val is None:
return present_optional
else:
is_num, new_val = cast(val)
if not is_num:
return False
else:
if min is not None and new_val < min:
return False
if max is not None and new_val > max:
return False
return True
if not message:
msg = ["must be a"]
if numtype == "int":
msg.append("whole number")
else:
msg.append("number")
if min is not None and max is not None:
msg.append("between {0} and {1}".format(min, max))
elif min is not None:
msg.append("greater than or equal to {0}".format(min))
elif max is not None:
msg.append("less than or equal to {0}".format(max))
message = " ".join(msg)
return Validation(check, property_name, message) |
python | def get_quantile_levels(density, x, y, xp, yp, q, normalize=True):
"""Compute density levels for given quantiles by interpolation
For a given 2D density, compute the density levels at which
the resulting contours contain the fraction `1-q` of all
data points. E.g. for a measurement of 1000 events, all
contours at the level corresponding to a quantile of
`q=0.95` (95th percentile) contain 50 events (5%).
Parameters
----------
density: 2d ndarray of shape (M, N)
Kernel density estimate for which to compute the contours
x: 2d ndarray of shape (M, N) or 1d ndarray of size M
X-values corresponding to `kde`
y: 2d ndarray of shape (M, N) or 1d ndarray of size M
Y-values corresponding to `kde`
xp: 1d ndarray of size D
Event x-data from which to compute the quantile
yp: 1d ndarray of size D
Event y-data from which to compute the quantile
q: array_like or float between 0 and 1
Quantile along which to find contours in `kde` relative
to its maximum
normalize: bool
Whether output levels should be normalized to the maximum
of `density`
Returns
-------
level: float
Contours level corresponding to the given quantile
Notes
-----
NaN-values events in `xp` and `yp` are ignored.
"""
# xy coordinates
if len(x.shape) == 2:
assert np.all(x[:, 0] == x[:, 1])
x = x[:, 0]
if len(y.shape) == 2:
assert np.all(y[0, :] == y[1, :])
y = y[0, :]
# remove bad events
bad = get_bad_vals(xp, yp)
xp = xp[~bad]
yp = yp[~bad]
# Normalize interpolation data such that the spacing for
# x and y is about the same during interpolation.
x_norm = x.max()
x = x / x_norm
xp = xp / x_norm
y_norm = y.max()
y = y / y_norm
yp = yp / y_norm
# Perform interpolation
dp = spint.interpn((x, y), density,
(xp, yp),
method='linear',
bounds_error=False,
fill_value=0)
if normalize:
dp /= density.max()
if not np.isscalar(q):
q = np.array(q)
plev = np.nanpercentile(dp, q=q*100)
return plev |
python | def sceneRect( self ):
"""
Returns the scene geometry for this node by resolving any \
inheritance position data since QGraphicsItem's return \
relative-space positions.
:return <QRectF>
"""
pos = self.scenePos()
rect = self.rect()
return QRectF(pos.x(), pos.y(), rect.width(), rect.height()) |
python | def moment2(self):
"""The second time delay weighted statistical momens of the
instantaneous unit hydrograph."""
moment1 = self.moment1
delays, response = self.delay_response_series
return statstools.calc_mean_time_deviation(
delays, response, moment1) |
python | def is_valid(self):
''' Validate form.
Return True if Django validates the form, the username obeys the parameters, and passwords match.
Return False otherwise.
'''
if not super(DeleteUserForm, self).is_valid():
return False
if self.user == self.request.user:
self._errors["__all__"] = self.error_class([MESSAGES['SELF_DELETE']])
return False
return True |
java | @Override
public UpdateXssMatchSetResult updateXssMatchSet(UpdateXssMatchSetRequest request) {
request = beforeClientExecution(request);
return executeUpdateXssMatchSet(request);
} |
python | def parse(self,type_regex=None):
"""
Each line of the frame cache file is like the following:
/frames/E13/LHO/frames/hoftMon_H1/H-H1_DMT_C00_L2-9246,H,H1_DMT_C00_L2,1,16 1240664820 6231 {924600000 924646720 924646784 924647472 924647712 924700000}
The description is as follows:
1.1) Directory path of files
1.2) Site
1.3) Type
1.4) Number of frames in the files (assumed to be 1)
1.5) Duration of the frame files.
2) UNIX timestamp for directory modification time.
3) Number of files that that match the above pattern in the directory.
4) List of time range or segments [start, stop)
We store the cache for each site and frameType combination
as a dictionary where the keys are (directory, duration)
tuples and the values are segment lists.
Since the cache file is already coalesced we do not
have to call the coalesce method on the segment lists.
"""
path = self.__path
cache = self.cache
if type_regex:
type_filter = re.compile(type_regex)
else:
type_filter = None
f = open(path, 'r')
# holds this iteration of the cache
gwfDict = {}
# parse each line in the cache file
for line in f:
# ignore lines that don't match the regex
if type_filter and type_filter.search(line) is None:
continue
# split on spaces and then comma to get the parts
header, modTime, fileCount, times = line.strip().split(' ', 3)
dir, site, frameType, frameCount, duration = header.split(',')
duration = int(duration)
# times string has form { t1 t2 t3 t4 t5 t6 ... tN t(N+1) }
# where the (ti, t(i+1)) represent segments
#
# first turn the times string into a list of integers
times = [ int(s) for s in times[1:-1].split(' ') ]
# group the integers by two and turn those tuples into segments
segments = [ pycbc_glue.segments.segment(a) for a in self.group(times, 2) ]
# initialize if necessary for this site
if not gwfDict.has_key(site):
gwfDict[site] = {}
# initialize if necessary for this frame type
if not gwfDict[site].has_key(frameType):
gwfDict[site][frameType] = {}
# record segment list as value indexed by the (directory, duration) tuple
key = (dir, duration)
if gwfDict[site][frameType].has_key(key):
msg = "The combination %s is not unique in the frame cache file" \
% str(key)
raise RuntimeError, msg
gwfDict[site][frameType][key] = pycbc_glue.segments.segmentlist(segments)
f.close()
cache['gwf'] = gwfDict |
python | def association_rules(self):
"""
Returns association rules that were generated. Only if implements AssociationRulesProducer.
:return: the association rules that were generated
:rtype: AssociationRules
"""
if not self.check_type(self.jobject, "weka.associations.AssociationRulesProducer"):
return None
return AssociationRules(
javabridge.call(self.jobject, "getAssociationRules", "()Lweka/associations/AssociationRules;")) |
python | def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Windows SDK 7.0 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environement from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise |
python | def tables(self):
"""
:return: all tables stored in this database
"""
cursor = self.connection.cursor()
cursor.execute("show tables in %s" % self.db)
self._tables = [t.Table(r[0], con=self.connection, db=self.db) for r in cursor.fetchall()]
return self._tables |
python | def get_method_by_name(self, class_name, method_name, method_descriptor):
"""
Search for a :class:`EncodedMethod` in all classes in this analysis
:param class_name: name of the class, for example 'Ljava/lang/Object;'
:param method_name: name of the method, for example 'onCreate'
:param method_descriptor: descriptor, for example '(I I Ljava/lang/String)V
:return: :class:`EncodedMethod` or None if method was not found
"""
if class_name in self.classes:
for method in self.classes[class_name].get_vm_class().get_methods():
if method.get_name() == method_name and method.get_descriptor() == method_descriptor:
return method
return None |
python | def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df |
java | public static String entityEncode(String text) {
String result = text;
if (result == null) {
return result;
}
// The escapeXml function doesn't cope with some 'special' chrs
return StringEscapeUtils.escapeXml10(XMLStringUtil.escapeControlChrs(result));
} |
python | def _get_top_states(saltenv='base'):
'''
Equivalent to a salt cli: salt web state.show_top
'''
alt_states = []
try:
returned = __salt__['state.show_top']()
for i in returned[saltenv]:
alt_states.append(i)
except Exception:
raise
# log.info("top states: %s", alt_states)
return alt_states |
python | def do_list_modules(self, long_output=None,sort_order=None):
"""Display a list of loaded modules.
Config items:
- shutit.list_modules['long']
If set, also print each module's run order value
- shutit.list_modules['sort']
Select the column by which the list is ordered:
- id: sort the list by module id
- run_order: sort the list by module run order
The output is also saved to ['build']['log_config_path']/module_order.txt
Dependencies: operator
"""
shutit_global.shutit_global_object.yield_to_draw()
cfg = self.cfg
# list of module ids and other details
# will also contain column headers
table_list = []
if long_output is None:
long_output = self.list_modules['long']
if sort_order is None:
sort_order = self.list_modules['sort']
if long_output:
# --long table: sort modules by run order
table_list.append(["Order","Module ID","Description","Run Order","Built","Compatible"])
#table_list.append(["Order","Module ID","Description","Run Order","Built"])
else:
# "short" table ==> sort module by module_id
#table_list.append(["Module ID","Description","Built"])
table_list.append(["Module ID","Description","Built","Compatible"])
if sort_order == 'run_order':
d = {}
for m in self.shutit_modules:
d.update({m.module_id:m.run_order})
# sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
b = sorted(d.items(), key=operator.itemgetter(1))
count = 0
# now b is a list of tuples (module_id, run_order)
for pair in b:
# module_id is the first item of the tuple
k = pair[0]
for m in self.shutit_modules:
if m.module_id == k:
count += 1
compatible = True
if not cfg[m.module_id]['shutit.core.module.build']:
cfg[m.module_id]['shutit.core.module.build'] = True
compatible = self.determine_compatibility(m.module_id) == 0
cfg[m.module_id]['shutit.core.module.build'] = False
if long_output:
table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])
else:
table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
elif sort_order == 'id':
l = []
for m in self.shutit_modules:
l.append(m.module_id)
l.sort()
for k in l:
for m in self.shutit_modules:
if m.module_id == k:
count = 1
compatible = True
if not cfg[m.module_id]['shutit.core.module.build']:
cfg[m.module_id]['shutit.core.module.build'] = True
compatible = self.determine_compatibility(m.module_id) == 0
if long_output:
table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])
else:
#table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])])
table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])
# format table for display
table = texttable.Texttable()
table.add_rows(table_list)
# Base length of table on length of strings
colwidths = []
for item in table_list:
for n in range(0,len(item)):
# default to 10 chars
colwidths.append(10)
break
for item in table_list:
for n in range(0,len(item)-1):
if len(str(item[n])) > colwidths[n]:
colwidths[n] = len(str(item[n]))
table.set_cols_width(colwidths)
msg = table.draw()
shutit_global.shutit_global_object.shutit_print('\n' + msg) |
java | public static void searchInFile(Pattern p, String inFile, String outFile, int seedLimit,
int graphPerSeed) throws FileNotFoundException
{
SimpleIOHandler h = new SimpleIOHandler();
Model model = h.convertFromOWL(new FileInputStream(inFile));
Map<BioPAXElement,List<Match>> matchMap = Searcher.search(model, p);
System.out.println("matching groups size = " + matchMap.size());
List<Set<Interaction>> inters = new LinkedList<Set<Interaction>>();
Set<Integer> encountered = new HashSet<Integer>();
Set<BioPAXElement> toExise = new HashSet<BioPAXElement>();
int seedCounter = 0;
for (BioPAXElement ele : matchMap.keySet())
{
if (seedCounter >= seedLimit) break;
int matchCounter = 0;
List<Match> matches = matchMap.get(ele);
if (!matches.isEmpty()) seedCounter++;
for (Match match : matches)
{
matchCounter++;
if (matchCounter > graphPerSeed) break;
Set<Interaction> ints = getInter(match);
toExise.addAll(Arrays.asList(match.getVariables()));
toExise.addAll(ints);
Integer hash = hashSum(ints);
if (!encountered.contains(hash))
{
encountered.add(hash);
inters.add(ints);
}
}
}
System.out.println("created pathways = " + inters.size());
Model clonedModel = excise(toExise);
int i = 0;
for (Set<Interaction> ints : inters)
{
Pathway pathway = clonedModel.addNew(Pathway.class,
System.currentTimeMillis() + "PaxtoolsPatternGeneratedMatch" + (++i));
pathway.setDisplayName("Match " + getLeadingZeros(i, inters.size()) + i);
for (Interaction anInt : ints)
{
pathway.addPathwayComponent((Process) clonedModel.getByID(anInt.getUri()));
}
}
handler.convertToOWL(clonedModel, new FileOutputStream(outFile));
} |
python | def configure(self, config_file):
"""
Parse configuration, and setup objects to use it.
"""
cfg = configparser.RawConfigParser()
try:
cfg.readfp(open(config_file))
except IOError as err:
logger.critical(
'Error while reading config file {}: {}'.format(
config_file, err.strerror))
sys.exit(1)
logger.info('Parsed config file ' + config_file)
# Extract user-defined log level from configuration
if cfg.has_option('milter', 'loglevel'):
loglevel = cfg.get('milter', 'loglevel')
loglevel_numeric = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel_numeric, int):
logger.critical(
'Config contains unsupported loglevel: ' + loglevel)
exit(1)
rl = logging.getLogger()
rl.setLevel(loglevel_numeric)
logger.debug(
'Config option applied: milter->loglevel: {}'.format(loglevel))
# Apply all config options to their respective classes
section_class_map = {
'milter': self,
'dspam': DspamClient,
'classification': DspamMilter,
}
for section in cfg.sections():
try:
class_ = section_class_map[section]
except KeyError:
logger.warning('Config contains unknown section: ' + section)
continue
logger.debug('Handling config section: ' + section)
dict_options = [
'headers',
'reject_classes',
'quarantine_classes',
'accept_classes'
]
for option in cfg.options(section):
# Kludge: static_user needs to be set on the milter,
# not on the client
if section == 'dspam' and option == 'static_user':
value = cfg.get('dspam', 'static_user')
DspamMilter.static_user = value
logger.debug(
'Config option applied: dspam->static_user: {}'.format(
value))
continue
if not hasattr(class_, option):
logger.warning(
'Config contains unknown option: {}->{}'.format(
section, option))
continue
value = cfg.get(section, option)
if option in dict_options:
value = utils.config_str2dict(value)
elif value.lower() in ['false', 'no']:
value = False
elif value.lower() in ['true', 'yes']:
value = True
setattr(class_, option, value)
logger.debug(
'Config option applied: {}->{}: {}'.format(
section, option, value))
logger.debug('Configuration completed') |
python | def read(self, size=-1):
"""Read at most `size` bytes from the file (less if there
isn't enough data).
The bytes are returned as an instance of :class:`str` (:class:`bytes`
in python 3). If `size` is negative or omitted all data is read.
:Parameters:
- `size` (optional): the number of bytes to read
"""
self._ensure_file()
if size == 0:
return EMPTY
remainder = int(self.length) - self.__position
if size < 0 or size > remainder:
size = remainder
received = 0
data = StringIO()
while received < size:
chunk_data = self.readchunk()
received += len(chunk_data)
data.write(chunk_data)
# Detect extra chunks.
max_chunk_n = math.ceil(self.length / float(self.chunk_size))
chunk = self.__chunks.find_one({"files_id": self._id,
"n": {"$gte": max_chunk_n}})
# According to spec, ignore extra chunks if they are empty.
if chunk is not None and len(chunk['data']):
raise CorruptGridFile(
"Extra chunk found: expected %i chunks but found "
"chunk with n=%i" % (max_chunk_n, chunk['n']))
self.__position -= received - size
# Return 'size' bytes and store the rest.
data.seek(size)
self.__buffer = data.read()
data.seek(0)
return data.read(size) |
python | async def get_devices(self):
"""Get the local installed version."""
data = await self.api("devices")
if self.connected and self.authenticated:
self._devices = data
else:
self._devices = self._devices
_LOGGER.debug(self._devices) |
java | public GroupName getGroupName(@NonNull List<String> prefixPath, @NonNull Map<String, MetricValue> extraTags) {
final Stream<String> suffixPath = data.entrySet().stream()
.filter(entry -> entry.getKey().getLeft().isPresent()) // Only retain int keys.
.sorted(Comparator.comparing(entry -> entry.getKey().getLeft().get())) // Sort by int key.
.map(Map.Entry::getValue)
.map(value -> value.mapCombine(b -> b.toString(), i -> i.toString(), Function.identity()));
final SimpleGroupPath path = SimpleGroupPath.valueOf(Stream.concat(prefixPath.stream(), suffixPath).collect(Collectors.toList()));
final Map<String, MetricValue> tags = data.entrySet().stream()
.filter(entry -> entry.getKey().getRight().isPresent()) // Only retain string keys.
.collect(Collectors.toMap(entry -> entry.getKey().getRight().get(), entry -> entry.getValue().mapCombine(MetricValue::fromBoolean, MetricValue::fromIntValue, MetricValue::fromStrValue)));
tags.putAll(extraTags);
return GroupName.valueOf(path, tags);
} |
java | @Override
public void releaseKam(String handle) {
if (handle == null) {
throw new InvalidArgument("handle", handle);
}
// Purge any cache entries
purgeHandle(handle);
} |
python | def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = hudson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts |
python | def create_notification_channel(self, callback_url, calendar_ids=()):
"""Create a new channel for receiving push notifications.
:param string callback_url: The url that will receive push notifications.
Must not be longer than 128 characters and should be HTTPS.
:param tuple calendar_ids: List of calendar ids to create notification channels for. (Optional. Default empty tuple)
:return: Channel id and channel callback
:rtype: ``dict``
"""
data = {'callback_url': callback_url}
if calendar_ids:
data['filters'] = {'calendar_ids': calendar_ids}
return self.request_handler.post('channels', data=data).json()['channel'] |
java | protected void closeTargetResource(
CompensatingTransactionHolderSupport transactionHolderSupport) {
DirContextHolder contextHolder = (DirContextHolder) transactionHolderSupport;
DirContext ctx = contextHolder.getCtx();
try {
LOG.debug("Closing target context");
ctx.close();
} catch (NamingException e) {
LOG.warn("Failed to close target context", e);
}
} |
python | def _generate(self, size=None):
"Generates a new word"
corpus_letters = list(self.vectors.keys())
current_letter = random.choice(corpus_letters)
if size is None:
size = int(random.normalvariate(self.avg, self.std_dev))
letters = [current_letter]
for _ in range(size):
if current_letter not in corpus_letters:
# current_letter = random.choice(corpus_letters)
break
found_letter = self.vectors[current_letter].choose()
letters.append(found_letter)
current_letter = found_letter
return ''.join(letters) |
java | @SuppressWarnings("unchecked")
public static List<EntityPlayerMP> getPlayersWatchingChunk(WorldServer world, int x, int z)
{
if (playersWatchingChunk == null)
return new ArrayList<>();
try
{
PlayerChunkMapEntry entry = world.getPlayerChunkMap().getEntry(x, z);
if (entry == null)
return Lists.newArrayList();
return (List<EntityPlayerMP>) playersWatchingChunk.get(entry);
}
catch (ReflectiveOperationException e)
{
MalisisCore.log.info("Failed to get players watching chunk :", e);
return new ArrayList<>();
}
} |
java | public boolean evaluate(FieldContainer container, Map<GenericCriteriaPrompt, Object> promptValues)
{
//
// Retrieve the LHS value
//
FieldType field = m_leftValue;
Object lhs;
if (field == null)
{
lhs = null;
}
else
{
lhs = container.getCurrentValue(field);
switch (field.getDataType())
{
case DATE:
{
if (lhs != null)
{
lhs = DateHelper.getDayStartDate((Date) lhs);
}
break;
}
case DURATION:
{
if (lhs != null)
{
Duration dur = (Duration) lhs;
lhs = dur.convertUnits(TimeUnit.HOURS, m_properties);
}
else
{
lhs = Duration.getInstance(0, TimeUnit.HOURS);
}
break;
}
case STRING:
{
lhs = lhs == null ? "" : lhs;
break;
}
default:
{
break;
}
}
}
//
// Retrieve the RHS values
//
Object[] rhs;
if (m_symbolicValues == true)
{
rhs = processSymbolicValues(m_workingRightValues, container, promptValues);
}
else
{
rhs = m_workingRightValues;
}
//
// Evaluate
//
boolean result;
switch (m_operator)
{
case AND:
case OR:
{
result = evaluateLogicalOperator(container, promptValues);
break;
}
default:
{
result = m_operator.evaluate(lhs, rhs);
break;
}
}
return result;
} |
java | public static boolean isSelfCall(JCTree tree) {
Name name = calledMethodName(tree);
if (name != null) {
Names names = name.table.names;
return name==names._this || name==names._super;
} else {
return false;
}
} |
java | public static ResourceConfiguration deserialize(final File pFile, final String pResource)
throws TTIOException {
try {
final File file =
new File(new File(new File(pFile, StorageConfiguration.Paths.Data.getFile().getName()),
pResource), Paths.ConfigBinary.getFile().getName());
FileReader fileReader = new FileReader(file);
JsonReader jsonReader = new JsonReader(fileReader);
jsonReader.beginObject();
// caring about the kind of the metabucket
jsonReader.nextName().equals(JSONNAMES[0]);
Class<?> metaBucketClazz = Class.forName(jsonReader.nextString());
// caring about the versioning
jsonReader.nextName().equals(JSONNAMES[1]);
Class<?> revClazz = Class.forName(jsonReader.nextString());
// caring about the DataFactory
jsonReader.nextName().equals(JSONNAMES[2]);
Class<?> dataFacClazz = Class.forName(jsonReader.nextString());
// caring about the ByteHandlers
List<IByteHandler> handlerList = new ArrayList<IByteHandler>();
if (jsonReader.nextName().equals(JSONNAMES[3])) {
jsonReader.beginArray();
while (jsonReader.hasNext()) {
Class<?> handlerClazz = Class.forName(jsonReader.nextString());
Constructor<?> handlerCons = handlerClazz.getConstructors()[0];
if (handlerClazz.getName().equals(Encryptor.class.getName())) {
handlerList.add((IByteHandler)handlerCons.newInstance(StandardSettings.KEY));
} else {
handlerList.add((IByteHandler)handlerCons.newInstance());
}
}
jsonReader.endArray();
}
ByteHandlerPipeline pipeline =
new ByteHandlerPipeline(handlerList.toArray(new IByteHandler[handlerList.size()]));
// caring about the storage
jsonReader.nextName().equals(JSONNAMES[4]);
Class<?> storageClazz = Class.forName(jsonReader.nextString());
jsonReader.nextName().equals(JSONNAMES[5]);
Properties props = new Properties();
jsonReader.beginObject();
while (jsonReader.hasNext()) {
props.setProperty(jsonReader.nextName(), jsonReader.nextString());
}
jsonReader.endObject();
jsonReader.endObject();
jsonReader.close();
fileReader.close();
Constructor<?> metaBucketCons = metaBucketClazz.getConstructors()[0];
IMetaEntryFactory metaFac = (IMetaEntryFactory)metaBucketCons.newInstance();
Constructor<?> dataFacCons = dataFacClazz.getConstructors()[0];
IDataFactory dataFactory = (IDataFactory)dataFacCons.newInstance();
Constructor<?> revCons = revClazz.getConstructors()[0];
IRevisioning revObject = (IRevisioning)revCons.newInstance();
Constructor<?> storageCons = storageClazz.getConstructors()[0];
IBackend backend = (IBackend)storageCons.newInstance(props, dataFactory, metaFac, pipeline);
return new ResourceConfiguration(props, backend, revObject, dataFactory, metaFac);
} catch (IOException | ClassNotFoundException | InstantiationException | IllegalAccessException
| InvocationTargetException exc) {
throw new TTIOException(exc);
}
} |
python | def securities(self):
""" Returns securities aggregate """
if not self.__securities_aggregate:
self.__securities_aggregate = SecuritiesAggregate(self.book)
return self.__securities_aggregate |
java | public final void setUtlInvLine(final UtlInvLine<RS, PurchaseInvoice,
PurchaseInvoiceLine, PurchaseInvoiceTaxLine,
PurchaseInvoiceGoodsTaxLine> pUtlInvLine) {
this.utlInvLine = pUtlInvLine;
} |
python | def match_trailer(self, tokens, item):
"""Matches typedefs and as patterns."""
internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid trailer match tokens", tokens)
match, trailers = tokens[0], tokens[1:]
for i in range(0, len(trailers), 2):
op, arg = trailers[i], trailers[i + 1]
if op == "is":
self.add_check("_coconut.isinstance(" + item + ", " + arg + ")")
elif op == "as":
if arg in self.names:
self.add_check(self.names[arg] + " == " + item)
elif arg != wildcard:
self.add_def(arg + " = " + item)
self.names[arg] = item
else:
raise CoconutInternalException("invalid trailer match operation", op)
self.match(match, item) |
python | def crackOCR(self, image):
""" Attempts to crack the given OCR
Uses the "darkest pixel" method to find the darkest pixel in the image. Once
found it generates a virtual box around the rest of the pet and returns the
x and y coordinate of the middle of the virtual box. About 98.7% accurate.
Parameters:
img (StringIO) -- The image content
Returns
tuple - The x and y coordinates of the center of the pet
Raises
failedOCR
"""
try:
im = Image.open(image)
# Convert to greyscale, and find darkest pixel
im = im.convert("L")
lo, hi = im.getextrema()
# Find the pet outline and create a rectangle around a part of it
im = im.point(lambda p: p == lo)
rect = im.getbbox()
# Return the center point
return (str(0.5 * (rect[0] + rect[2])), str(0.5 * (rect[1] + rect[3])))
except Exception:
logging.getLogger("neolib.item").exception("Failed to crack OCR")
raise failedOCR |
python | def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
"""
return -np.sum(-mean + np.log(mean)*y - sp.gammaln(y + 1)) |
java | public void executePropertyModification(CmsPropertyModification propMod) {
CmsClientSitemapEntry entry = getEntryById(propMod.getId());
if (entry != null) {
Map<String, CmsClientProperty> props = getPropertiesForId(propMod.getId());
if (props != null) {
propMod.updatePropertyInMap(props);
entry.setOwnProperties(props);
}
}
} |
java | public static String formatRuntime(long runtime) {
long seconds = (runtime / SECONDS) % 60;
long minutes = (runtime / MINUTES) % 60;
long hours = (runtime / HOURS) % 24;
long days = runtime / DAYS;
StringBuffer strBuf = new StringBuffer();
if (days > 0) {
if (days < 10) {
strBuf.append('0');
}
strBuf.append(days);
strBuf.append(':');
}
if (hours < 10) {
strBuf.append('0');
}
strBuf.append(hours);
strBuf.append(':');
if (minutes < 10) {
strBuf.append('0');
}
strBuf.append(minutes);
strBuf.append(':');
if (seconds < 10) {
strBuf.append('0');
}
strBuf.append(seconds);
return strBuf.toString();
} |
python | def query(self, coords, mode='random_sample', return_flags=False, pct=None):
"""
Returns reddening at the requested coordinates. There are several
different query modes, which handle the probabilistic nature of the map
differently.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
mode (Optional[:obj:`str`]): Seven different query modes are available:
'random_sample', 'random_sample_per_pix' 'samples', 'median',
'mean', 'best' and 'percentile'. The :obj:`mode` determines how the
output will reflect the probabilistic nature of the Bayestar
dust maps.
return_flags (Optional[:obj:`bool`]): If :obj:`True`, then QA flags will be
returned in a second numpy structured array. That is, the query
will return :obj:`ret`, :obj:'flags`, where :obj:`ret` is the normal return
value, containing reddening. Defaults to :obj:`False`.
pct (Optional[:obj:`float` or list/array of :obj:`float`]): If the mode is
:obj:`percentile`, then :obj:`pct` specifies which percentile(s) is
(are) returned.
Returns:
Reddening at the specified coordinates, in magnitudes of reddening.
The conversion to E(B-V) (or other reddening units) depends on
whether :obj:`version='bayestar2017'` (the default) or
:obj:`'bayestar2015'` was selected when the :obj:`BayestarQuery` object
was created. To convert Bayestar2017 to Pan-STARRS 1 extinctions,
multiply by the coefficients given in Table 1 of Green et al.
(2018). Conversion to extinction in non-PS1 passbands depends on the
choice of extinction law. To convert Bayestar2015 to extinction in
various passbands, multiply by the coefficients in Table 6 of
Schlafly & Finkbeiner (2011). See Green et al. (2015, 2018) for more
detailed discussion of how to convert the Bayestar dust maps into
reddenings or extinctions in different passbands.
The shape of the output depends on the :obj:`mode`, and on whether
:obj:`coords` contains distances.
If :obj:`coords` does not specify distance(s), then the shape of the
output begins with :obj:`coords.shape`. If :obj:`coords` does specify
distance(s), then the shape of the output begins with
:obj:`coords.shape + ([number of distance bins],)`.
If :obj:`mode` is :obj:`'random_sample'`, then at each
coordinate/distance, a random sample of reddening is given.
If :obj:`mode` is :obj:`'random_sample_per_pix'`, then the sample chosen
for each angular pixel of the map will be consistent. For example,
if two query coordinates lie in the same map pixel, then the same
random sample will be chosen from the map for both query
coordinates.
If :obj:`mode` is :obj:`'median'`, then at each coordinate/distance, the
median reddening is returned.
If :obj:`mode` is :obj:`'mean'`, then at each coordinate/distance, the
mean reddening is returned.
If :obj:`mode` is :obj:`'best'`, then at each coordinate/distance, the
maximum posterior density reddening is returned (the "best fit").
If :obj:`mode` is :obj:`'percentile'`, then an additional keyword
argument, :obj:`pct`, must be specified. At each coordinate/distance,
the requested percentiles (in :obj:`pct`) will be returned. If :obj:`pct`
is a list/array, then the last axis of the output will correspond to
different percentiles.
Finally, if :obj:`mode` is :obj:`'samples'`, then at each
coordinate/distance, all samples are returned. The last axis of the
output will correspond to different samples.
If :obj:`return_flags` is :obj:`True`, then in addition to reddening, a
structured array containing QA flags will be returned. If the input
coordinates include distances, the QA flags will be :obj:`"converged"`
(whether or not the line-of-sight fit converged in a given pixel)
and :obj:`"reliable_dist"` (whether or not the requested distance is
within the range considered reliable, based on the inferred
stellar distances). If the input coordinates do not include
distances, then instead of :obj:`"reliable_dist"`, the flags will
include :obj:`"min_reliable_distmod"` and :obj:`"max_reliable_distmod"`,
the minimum and maximum reliable distance moduli in the given pixel.
"""
# Check that the query mode is supported
self._raise_on_mode(mode)
# Validate percentile specification
pct, scalar_pct = self._interpret_percentile(mode, pct)
# Get number of coordinates requested
n_coords_ret = coords.shape[0]
# Determine if distance has been requested
has_dist = hasattr(coords.distance, 'kpc')
d = coords.distance.kpc if has_dist else None
# Extract the correct angular pixel(s)
# t0 = time.time()
pix_idx = self._find_data_idx(coords.l.deg, coords.b.deg)
in_bounds_idx = (pix_idx != -1)
# t1 = time.time()
# Extract the correct samples
if mode == 'random_sample':
# A different sample in each queried coordinate
samp_idx = np.random.randint(0, self._n_samples, pix_idx.size)
n_samp_ret = 1
elif mode == 'random_sample_per_pix':
# Choose same sample in all coordinates that fall in same angular
# HEALPix pixel
samp_idx = np.random.randint(0, self._n_samples, self._n_pix)[pix_idx]
n_samp_ret = 1
elif mode == 'best':
samp_idx = slice(None)
n_samp_ret = 1
else:
# Return all samples in each queried coordinate
samp_idx = slice(None)
n_samp_ret = self._n_samples
# t2 = time.time()
if mode == 'best':
val = self._best_fit
else:
val = self._samples
# Create empty array to store flags
if return_flags:
if has_dist:
# If distances are provided in query, return only covergence and
# whether or not this distance is reliable
dtype = [('converged', 'bool'),
('reliable_dist', 'bool')]
# shape = (n_coords_ret)
else:
# Return convergence and reliable distance ranges
dtype = [('converged', 'bool'),
('min_reliable_distmod', 'f4'),
('max_reliable_distmod', 'f4')]
flags = np.empty(n_coords_ret, dtype=dtype)
# samples = self._samples[pix_idx, samp_idx]
# samples[pix_idx == -1] = np.nan
# t3 = time.time()
# Extract the correct distance bin (possibly using linear interpolation)
if has_dist: # Distance has been provided
# Determine ceiling bin index for each coordinate
dm = 5. * (np.log10(d) + 2.)
bin_idx_ceil = np.searchsorted(self._DM_bin_edges, dm)
# Create NaN-filled return arrays
if isinstance(samp_idx, slice):
ret = np.full((n_coords_ret, n_samp_ret), np.nan, dtype='f4')
else:
ret = np.full((n_coords_ret,), np.nan, dtype='f4')
# d < d(nearest distance slice)
idx_near = (bin_idx_ceil == 0) & in_bounds_idx
if np.any(idx_near):
a = 10.**(0.2 * (dm[idx_near] - self._DM_bin_edges[0]))
if isinstance(samp_idx, slice):
ret[idx_near] = (
a[:,None]
* val[pix_idx[idx_near], samp_idx, 0])
else:
# print('idx_near: {} true'.format(np.sum(idx_near)))
# print('ret[idx_near].shape = {}'.format(ret[idx_near].shape))
# print('val.shape = {}'.format(val.shape))
# print('pix_idx[idx_near].shape = {}'.format(pix_idx[idx_near].shape))
ret[idx_near] = (
a * val[pix_idx[idx_near], samp_idx[idx_near], 0])
# d > d(farthest distance slice)
idx_far = (bin_idx_ceil == self._n_distances) & in_bounds_idx
if np.any(idx_far):
# print('idx_far: {} true'.format(np.sum(idx_far)))
# print('pix_idx[idx_far].shape = {}'.format(pix_idx[idx_far].shape))
# print('ret[idx_far].shape = {}'.format(ret[idx_far].shape))
# print('val.shape = {}'.format(val.shape))
if isinstance(samp_idx, slice):
ret[idx_far] = val[pix_idx[idx_far], samp_idx, -1]
else:
ret[idx_far] = val[pix_idx[idx_far], samp_idx[idx_far], -1]
# d(nearest distance slice) < d < d(farthest distance slice)
idx_btw = ~idx_near & ~idx_far & in_bounds_idx
if np.any(idx_btw):
DM_ceil = self._DM_bin_edges[bin_idx_ceil[idx_btw]]
DM_floor = self._DM_bin_edges[bin_idx_ceil[idx_btw]-1]
a = (DM_ceil - dm[idx_btw]) / (DM_ceil - DM_floor)
if isinstance(samp_idx, slice):
ret[idx_btw] = (
(1.-a[:,None])
* val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]]
+ a[:,None]
* val[pix_idx[idx_btw], samp_idx, bin_idx_ceil[idx_btw]-1]
)
else:
ret[idx_btw] = (
(1.-a) * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]]
+ a * val[pix_idx[idx_btw], samp_idx[idx_btw], bin_idx_ceil[idx_btw]-1]
)
# Flag: distance in reliable range?
if return_flags:
dm_min = self._pixel_info['DM_reliable_min'][pix_idx]
dm_max = self._pixel_info['DM_reliable_max'][pix_idx]
flags['reliable_dist'] = (
(dm >= dm_min) &
(dm <= dm_max) &
np.isfinite(dm_min) &
np.isfinite(dm_max))
flags['reliable_dist'][~in_bounds_idx] = False
else: # No distances provided
ret = val[pix_idx, samp_idx, :] # Return all distances
ret[~in_bounds_idx] = np.nan
# Flag: reliable distance bounds
if return_flags:
dm_min = self._pixel_info['DM_reliable_min'][pix_idx]
dm_max = self._pixel_info['DM_reliable_max'][pix_idx]
flags['min_reliable_distmod'] = dm_min
flags['max_reliable_distmod'] = dm_max
flags['min_reliable_distmod'][~in_bounds_idx] = np.nan
flags['max_reliable_distmod'][~in_bounds_idx] = np.nan
# t4 = time.time()
# Flag: convergence
if return_flags:
flags['converged'] = (
self._pixel_info['converged'][pix_idx].astype(np.bool))
flags['converged'][~in_bounds_idx] = False
# t5 = time.time()
# Reduce the samples in the requested manner
if mode == 'median':
ret = np.median(ret, axis=1)
elif mode == 'mean':
ret = np.mean(ret, axis=1)
elif mode == 'percentile':
ret = np.nanpercentile(ret, pct, axis=1)
if not scalar_pct:
# (percentile, pixel) -> (pixel, percentile)
# (pctile, pixel, distance) -> (pixel, distance, pctile)
ret = np.moveaxis(ret, 0, -1)
elif mode == 'best':
# Remove "samples" axis
s = ret.shape
ret.shape = s[:1] + s[2:]
elif mode == 'samples':
# Swap sample and distance axes to be consistent with other 3D dust
# maps. The output shape will be (pixel, distance, sample).
if not has_dist:
np.swapaxes(ret, 1, 2)
# t6 = time.time()
#
# print('')
# print('time inside bayestar.query: {:.4f} s'.format(t6-t0))
# print('{: >7.4f} s : {: >6.4f} s : _find_data_idx'.format(t1-t0, t1-t0))
# print('{: >7.4f} s : {: >6.4f} s : sample slice spec'.format(t2-t0, t2-t1))
# print('{: >7.4f} s : {: >6.4f} s : create empty return flag array'.format(t3-t0, t3-t2))
# print('{: >7.4f} s : {: >6.4f} s : extract results'.format(t4-t0, t4-t3))
# print('{: >7.4f} s : {: >6.4f} s : convergence flag'.format(t5-t0, t5-t4))
# print('{: >7.4f} s : {: >6.4f} s : reduce'.format(t6-t0, t6-t5))
# print('')
if return_flags:
return ret, flags
return ret |
java | public Calendar deserialize(String serString) throws ValueFormatException
{
final String[] parts = serString.split(CALENDAR_FIELDS_SEPARATOR);
if (parts.length == 2)
{
// try parse serialized string with two formats
// 1. Complete ISO 8610 compliant
// 2. Complete ISO 8610 + RFC822 (time zone) compliant (JCR 1.6 and prior)
Calendar isoCalendar = null;
try
{
isoCalendar = ISO8601.parse(parts[0], JCR_FORMATS);
String[] calendarFields = parts[1].split(CALENDAR_FIELDS_DELIMITER);
if (calendarFields.length == 4)
{
try
{
isoCalendar.setLenient(Boolean.parseBoolean(calendarFields[0]));
isoCalendar.setFirstDayOfWeek(Integer.parseInt(calendarFields[1]));
isoCalendar.setMinimalDaysInFirstWeek(Integer.parseInt(calendarFields[2]));
isoCalendar.setTimeZone(Tools.getTimeZone(calendarFields[3]));
}
catch (Exception e)
{
log.warn("Can't parse serialized fields for the calendar [" + parts[1] + "] but calendar has ["
+ isoCalendar.getTime() + "]", e);
}
}
else
{
log.warn("Fields of the calendar is not serialized properly [" + parts[1] + "] but calendar has ["
+ isoCalendar.getTime() + "]");
}
}
catch (ParseException e)
{
throw new ValueFormatException(e);
}
return isoCalendar;
}
throw new ValueFormatException("Can't deserialize calendar string [" + serString + "]");
} |
python | def merge(self, other):
"""
We can merge unless the merge results in an empty set -- a
contradiction
"""
other = self.coerce(other)
if self.is_equal(other):
# pick among dependencies
return self
elif self.is_contradictory(other):
raise Contradiction("Cannot merge set with %s" % (str(other)))
else:
# self may be a subset of other
# or other may be a subset of self
# merge mutual information
if self.values:
self.values = self.values.union(other.values)
else:
self.values = other.values.copy()
return self |
python | def _get_hparams_path():
"""Get hyper-parameters file path."""
hparams_path = None
if FLAGS.output_dir:
hparams_path = os.path.join(FLAGS.output_dir, "hparams.json")
else:
tf.logging.warning(
"--output_dir not specified. Hyper-parameters will be infered from"
"--hparams_set and --hparams only. These may not match training time"
"hyper-parameters.")
return hparams_path |
java | @Deprecated
public static int decompose(char[] src,int srcStart, int srcLimit,
char[] dest,int destStart, int destLimit,
boolean compat, int options) {
CharBuffer srcBuffer = CharBuffer.wrap(src, srcStart, srcLimit - srcStart);
CharsAppendable app = new CharsAppendable(dest, destStart, destLimit);
getDecomposeNormalizer2(compat, options).normalize(srcBuffer, app);
return app.length();
} |
java | public static String normalize(String val) {
if (val == null || val.trim().length() == 0) return null;
return val.trim();
} |
java | @Override public XGBoostModel createImpl() {
XGBoostV3.XGBoostParametersV3 p = this.parameters;
XGBoostModel.XGBoostParameters parms = p.createImpl();
return new XGBoostModel(model_id.key(), parms, new XGBoostOutput(null), null, null);
} |
python | def push(self, undoObj):
"""
Add ``undoObj`` command to stack and run its ``commit`` method.
|Args|
* ``undoObj`` (**QtmacsUndoCommand**): the new command object.
|Returns|
* **None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Check type of input arguments.
if not isinstance(undoObj, QtmacsUndoCommand):
raise QtmacsArgumentError('undoObj', 'QtmacsUndoCommand',
inspect.stack()[0][3])
# Flag that the last action was not an undo action and push
# the command to the stack.
self._wasUndo = False
self._push(undoObj) |
python | def s_magic(sfile, anisfile="specimens.txt", dir_path=".", atype="AMS",
coord_type="s", sigma=False, samp_con="1", specnum=0,
location="unknown", spec="unknown", sitename="unknown",
user="", data_model_num=3, name_in_file=False, input_dir_path=""):
"""
converts .s format data to measurements format.
Parameters
----------
sfile : str
.s format file, required
anisfile : str
specimen filename, default 'specimens.txt'
dir_path : str
output directory, default "."
atype : str
anisotropy type (AMS, AARM, ATRM, default AMS)
coord_type : str
coordinate system ('s' for specimen, 't' for tilt-corrected,
or 'g' for geographic, default 's')
sigma : bool
if True, last column has sigma, default False
samp_con : str
sample/site naming convention, default '1', see info below
specnum : int
number of characters to designate a specimen, default 0
location : str
location name, default "unknown"
spec : str
specimen name, default "unknown"
sitename : str
site name, default "unknown"
user : str
user name, default ""
data_model_num : int
MagIC data model 2 or 3, default 3
name_in_file : bool
first entry of each line is specimen name, default False
input_dir_path : input directory path IF different from dir_path, default ""
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
Input format
--------
X11,X22,X33,X12,X23,X13 (.s format file)
X11,X22,X33,X12,X23,X13,sigma (.s format file with -sig option)
SID, X11,X22,X33,X12,X23,X13 (.s format file with -n option)
Info
--------
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
"""
con, Z = "", 1
if samp_con:
samp_con = str(samp_con)
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if samp_con == '6':
print("option [6] is not currently supported")
return
else:
samp_con = con
coord_dict = {'s': '-1', 't': '100', 'g': '0'}
coord = coord_dict.get(coord_type, '-1')
specnum = -specnum
if data_model_num == 2:
specimen_col = "er_specimen_name"
sample_col = "er_sample_name"
site_col = "er_site_name"
loc_col = "er_location_name"
citation_col = "er_citation_names"
analyst_col = "er_analyst_mail_names"
aniso_type_col = "anisotropy_type"
experiment_col = "magic_experiment_names"
sigma_col = "anisotropy_sigma"
unit_col = "anisotropy_unit"
tilt_corr_col = "anisotropy_tilt_correction"
method_col = "magic_method_codes"
outfile_type = "rmag_anisotropy"
else:
specimen_col = "specimen"
sample_col = "sample"
site_col = "site"
loc_col = "location"
citation_col = "citations"
analyst_col = "analysts"
aniso_type_col = "aniso_type"
experiment_col = "experiments"
sigma_col = "aniso_s_sigma"
unit_col = "aniso_s_unit"
tilt_corr_col = "aniso_tilt_correction"
method_col = "method_codes"
outfile_type = "specimens"
# get down to bidness
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
sfile = pmag.resolve_file_name(sfile, input_dir_path)
anisfile = pmag.resolve_file_name(anisfile, dir_path)
try:
with open(sfile, 'r') as f:
lines = f.readlines()
except FileNotFoundError:
return False, "No such file: {}".format(sfile)
AnisRecs = []
citation = "This study"
# read in data
for line in lines:
AnisRec = {}
rec = line.split()
if name_in_file:
k = 1
spec = rec[0]
else:
k = 0
trace = float(rec[k])+float(rec[k+1])+float(rec[k+2])
s1 = '%10.9e' % (float(rec[k]) / trace)
s2 = '%10.9e' % (float(rec[k+1]) / trace)
s3 = '%10.9e' % (float(rec[k+2]) / trace)
s4 = '%10.9e' % (float(rec[k+3]) / trace)
s5 = '%10.9e' % (float(rec[k+4]) / trace)
s6 = '%10.9e' % (float(rec[k+5]) / trace)
AnisRec[citation_col] = citation
AnisRec[specimen_col] = spec
if specnum != 0:
AnisRec[sample_col] = spec[:specnum]
else:
AnisRec[sample_col] = spec
# if samp_con == "6":
# for samp in Samps:
# if samp['er_sample_name'] == AnisRec["er_sample_name"]:
# sitename = samp['er_site_name']
# location = samp['er_location_name']
if samp_con != "":
sitename = pmag.parse_site(AnisRec[sample_col], samp_con, Z)
AnisRec[loc_col] = location
AnisRec[site_col] = sitename
AnisRec[analyst_col] = user
if atype == 'AMS':
AnisRec[aniso_type_col] = "AMS"
AnisRec[experiment_col] = spec+":LP-X"
else:
AnisRec[aniso_type_col] = atype
AnisRec[experiment_col] = spec+":LP-"+atype
if data_model_num != 3:
AnisRec["anisotropy_s1"] = s1
AnisRec["anisotropy_s2"] = s2
AnisRec["anisotropy_s3"] = s3
AnisRec["anisotropy_s4"] = s4
AnisRec["anisotropy_s5"] = s5
AnisRec["anisotropy_s6"] = s6
else:
AnisRec['aniso_s'] = ":".join(
[str(s) for s in [s1, s2, s3, s4, s5, s6]])
if sigma:
AnisRec[sigma_col] = '%10.8e' % (
float(rec[k+6]) / trace)
AnisRec[unit_col] = 'SI'
AnisRec[tilt_corr_col] = coord
AnisRec[method_col] = 'LP-' + atype
AnisRecs.append(AnisRec)
pmag.magic_write(anisfile, AnisRecs, outfile_type)
print('data saved in ', anisfile)
# try to extract location/site/sample info into tables
con = cb.Contribution(dir_path, custom_filenames={"specimens": anisfile})
con.propagate_all_tables_info()
for table in con.tables:
if table in ['samples', 'sites', 'locations']:
# add in location name by hand
if table == 'sites':
con.tables['sites'].df['location'] = location
con.write_table_to_file(table)
return True, anisfile |
java | @Override
public Object getObject (String key) {
// separate the key into path components, the "local" key value is the first component, so
// use that to conduct the search. We are only interested in values that indicate the search
// found the requested key
String[] path = Key.split (key);
int index = binarySearch (path[0]);
if (index >= 0) {
// grab the found element... if the path was only one element long, this is the element
// we were looking for, otherwise recur on the found element as another BagObject
Pair pair = container[index];
Object found = pair.value;
return (path.length == 1) ? found : ((Bag) found).getObject (path[1]);
}
return null;
} |
python | def describe_api_integration_response(restApiId, resourcePath, httpMethod, statusCode,
region=None, key=None, keyid=None, profile=None):
'''
Get an integration response for a given method in a given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.describe_api_integration_response restApiId resourcePath httpMethod statusCode
'''
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = conn.get_integration_response(restApiId=restApiId, resourceId=resource['id'],
httpMethod=httpMethod, statusCode=statusCode)
return {'response': _convert_datetime_str(response)}
return {'error': 'no such resource'}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
java | public ResolvableType getComponentType() {
if (this == NONE) {
return NONE;
}
if (this.componentType != null) {
return this.componentType;
}
if (this.type instanceof Class) {
Class<?> componentType = ((Class<?>) this.type).getComponentType();
return forType(componentType, this.variableResolver);
}
if (this.type instanceof GenericArrayType) {
return forType(((GenericArrayType) this.type).getGenericComponentType(), this.variableResolver);
}
return resolveType().getComponentType();
} |
python | def moving_average(self, window, method=SIMPLE):
'''Calculate a moving average using the specified method and window'''
if len(self.points) < window:
raise ArithmeticError('Not enough points for moving average')
numpy = LazyImport.numpy()
if method == TimeSeries.SIMPLE:
weights = numpy.ones(window) / float(window)
ma_x = self.timestamps[window-1:]
ma_y = numpy.convolve(self.values, weights)[window-1:-(window-1)].tolist()
return TimeSeries(zip(ma_x, ma_y)) |
python | def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby |
java | protected void updateIndex(I_CmsSearchIndex index, I_CmsReport report, List<CmsPublishedResource> resourcesToIndex)
throws CmsException {
if (shouldUpdateAtAll(index)) {
try {
SEARCH_MANAGER_LOCK.lock();
// copy the stored admin context for the indexing
CmsObject cms = OpenCms.initCmsObject(m_adminCms);
// make sure a report is available
if (report == null) {
report = new CmsLogReport(cms.getRequestContext().getLocale(), CmsSearchManager.class);
}
// check if the index has been configured correctly
if (!index.checkConfiguration(cms)) {
// the index is disabled
return;
}
// set site root and project for this index
cms.getRequestContext().setSiteRoot("/");
// switch to the index project
cms.getRequestContext().setCurrentProject(cms.readProject(index.getProject()));
if ((resourcesToIndex == null) || resourcesToIndex.isEmpty()) {
// rebuild the complete index
updateIndexCompletely(cms, index, report);
} else {
updateIndexIncremental(cms, index, report, resourcesToIndex);
}
} finally {
SEARCH_MANAGER_LOCK.unlock();
}
}
} |
python | def _molfile(stream):
"""Process ``Molfile``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
"""
yield MolfileStart()
yield HeaderBlock(stream.popleft().strip(), stream.popleft().strip(), stream.popleft().strip())
# yield from _ctab(stream)
for token in _ctab(stream):
yield token
yield MolfileEnd() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.