language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def lookup_announce_alias(name):
""" Get canonical alias name and announce URL list for the given alias.
"""
for alias, urls in announce.items():
if alias.lower() == name.lower():
return alias, urls
raise KeyError("Unknown alias %s" % (name,)) |
java | public List<CmsRelation> readRelations(CmsRelationFilter filter) throws CmsException {
return m_securityManager.getRelationsForResource(m_context, null, filter);
} |
java | public static int[] calculateBlockGap(int[][][] optAln){
//Initialize the array to be returned
int [] blockGap = new int[optAln.length];
//Loop for every block and look in both chains for non-contiguous residues.
for (int i=0; i<optAln.length; i++){
int gaps = 0; //the number of gaps in that block
int last1 = 0; //the last residue position in chain 1
int last2 = 0; //the last residue position in chain 2
//Loop for every position in the block
for (int j=0; j<optAln[i][0].length; j++){
//If the first position is evaluated initialize the last positions
if (j==0){
last1 = optAln[i][0][j];
last2 = optAln[i][1][j];
}
else{
//If one of the positions or both are not contiguous increment the number of gaps
if (optAln[i][0][j] > last1+1 || optAln[i][1][j] > last2+1){
gaps++;
last1 = optAln[i][0][j];
last2 = optAln[i][1][j];
}
//Otherwise just set the last position to the current one
else{
last1 = optAln[i][0][j];
last2 = optAln[i][1][j];
}
}
}
blockGap[i] = gaps;
}
return blockGap;
} |
python | def get_calltip(project, source_code, offset, resource=None,
maxfixes=1, ignore_unknown=False, remove_self=False):
"""Get the calltip of a function
The format of the returned string is
``module_name.holding_scope_names.function_name(arguments)``. For
classes `__init__()` and for normal objects `__call__()` function
is used.
Note that the offset is on the function itself *not* after the its
open parenthesis. (Actually it used to be the other way but it
was easily confused when string literals were involved. So I
decided it is better for it not to try to be too clever when it
cannot be clever enough). You can use a simple search like::
offset = source_code.rindex('(', 0, offset) - 1
to handle simple situations.
If `ignore_unknown` is `True`, `None` is returned for functions
without source-code like builtins and extensions.
If `remove_self` is `True`, the first parameter whose name is self
will be removed for methods.
"""
fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes)
pyname = fixer.pyname_at(offset)
if pyname is None:
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_calltip(pyobject, ignore_unknown, remove_self) |
java | public void addMasterState(MasterState state) {
checkNotNull(state);
synchronized (lock) {
if (!discarded) {
masterState.add(state);
}
}
} |
java | public static String addTo(Message message) {
if (message.getStanzaId() == null) {
message.setStanzaId(StanzaIdUtil.newStanzaId());
}
message.addExtension(new DeliveryReceiptRequest());
return message.getStanzaId();
} |
java | protected void processLayer(GrayF32 image1 , GrayF32 image2 ,
GrayF32 deriv1X , GrayF32 deriv1Y,
GrayF32 deriv2X , GrayF32 deriv2Y,
GrayF32 deriv2XX , GrayF32 deriv2YY, GrayF32 deriv2XY) {
int N = image1.width*image1.height;
int stride = image1.stride;
// outer Taylor expansion iterations
for( int indexOuter = 0; indexOuter < numOuter; indexOuter++ ) {
// warp the image and the first + second derivatives
warpImageTaylor(image2, flowU, flowV, warpImage2);
warpImageTaylor(deriv2X, flowU, flowV, warpDeriv2X);
warpImageTaylor(deriv2Y, flowU, flowV, warpDeriv2Y);
warpImageTaylor(deriv2XX, flowU, flowV, warpDeriv2XX);
warpImageTaylor(deriv2YY, flowU, flowV, warpDeriv2YY);
warpImageTaylor(deriv2XY, flowU, flowV, warpDeriv2XY);
gradient.process(flowU,derivFlowUX,derivFlowUY);
gradient.process(flowV,derivFlowVX,derivFlowVY);
computePsiSmooth(derivFlowUX,derivFlowUY,derivFlowVX,derivFlowVY,psiSmooth);
computeDivUVD(flowU, flowV,psiSmooth,divU,divV,divD);
// initialize the motion increments to zero
Arrays.fill(du.data,0,N,0);
Arrays.fill(dv.data,0,N,0);
for( int indexInner = 0; indexInner < numInner; indexInner++ ) {
computePsiDataPsiGradient(image1, image2,
deriv1X, deriv1Y,
deriv2X, deriv2Y, deriv2XX, deriv2YY, deriv2XY,
du, dv, psiData, psiGradient);
float error;
int iter = 0;
do {
// inner SOR iteration.
error = 0;
// inner portion
for (int y = 1; y < image1.height - 1; y++) {
int i = y * image1.width + 1;
for (int x = 1; x < image1.width - 1; x++, i++) {
error += iterationSor(image1, deriv1X, deriv1Y, i, i + 1, i - 1, i + stride, i - stride);
}
}
// border regions require special treatment
int y0 = 0;
int y1 = image1.height-1;
for (int x = 0; x < image1.width; x++ ) {
error += iterationSor(image1, deriv1X, deriv1Y,
s(x, y0), s(x + 1, y0), s(x - 1, y0), s(x, y0 - 1), s(x, y0 + 1));
error += iterationSor(image1, deriv1X, deriv1Y,
s(x, y1), s(x + 1, y1), s(x - 1, y1), s(x, y1 - 1), s(x, y1 + 1));
}
int x0 = 0;
int x1 = image1.width-1;
for (int y = 1; y < image1.height - 1; y++) {
error += iterationSor(image1, deriv1X, deriv1Y,
s(x0, y), s(x0 - 1, y), s(x0 + 1, y), s(x0, y - 1), s(x0, y + 1));
error += iterationSor(image1, deriv1X, deriv1Y,
s(x1, y), s(x1 - 1, y), s(x1 + 1, y), s(x1, y - 1), s(x1, y + 1));
}
} while (error > convergeTolerance * image1.width * image1.height && ++iter < maxIterationsSor);
}
// update the flow with the motion increments
PixelMath.add(flowU,du, flowU);
PixelMath.add(flowV,dv, flowV);
}
} |
python | def _parse_node(graph, text, condition_node_params, leaf_node_params):
"""parse dumped node"""
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), **condition_node_params)
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), **leaf_node_params)
return node
raise ValueError('Unable to parse node: {0}'.format(text)) |
python | def get_all_rooted_subtrees_as_lists(self, start_location=None):
"""Return a list of all rooted subtrees (each as a list of Location objects)."""
if start_location is not None and start_location not in self._location_to_children:
raise AssertionError(u'Received invalid start_location {} that was not present '
u'in the tree. Present root locations of complex @optional '
u'queries (ones that expand vertex fields within) are: {}'
.format(start_location, self._location_to_children.keys()))
if start_location is None:
start_location = self._root_location
if len(self._location_to_children[start_location]) == 0:
# Node with no children only returns a singleton list containing the null set.
return [[]]
current_children = sorted(self._location_to_children[start_location])
# Recursively find all rooted subtrees of each of the children of the current node.
location_to_list_of_subtrees = {
location: list(self.get_all_rooted_subtrees_as_lists(location))
for location in current_children
}
# All subsets of direct child Location objects
all_location_subsets = [
list(subset)
for subset in itertools.chain(*[
itertools.combinations(current_children, x)
for x in range(0, len(current_children) + 1)
])
]
# For every possible subset of the children, and every combination of the chosen
# subtrees within, create a list of subtree Location lists.
new_subtrees_as_lists = []
for location_subset in all_location_subsets:
all_child_subtree_possibilities = [
location_to_list_of_subtrees[location]
for location in location_subset
]
all_child_subtree_combinations = itertools.product(*all_child_subtree_possibilities)
for child_subtree_combination in all_child_subtree_combinations:
merged_child_subtree_combination = list(itertools.chain(*child_subtree_combination))
new_subtree_as_list = location_subset + merged_child_subtree_combination
new_subtrees_as_lists.append(new_subtree_as_list)
return new_subtrees_as_lists |
java | private String deriveId(final String idName) {
// Find parent naming context
NamingContextable parent = WebUtilities.getParentNamingContext(this);
// No Parent
if (parent == null) {
return idName;
}
// Get ID prefix
String prefix = parent.getNamingContextId();
// No Prefix, just use id name
if (prefix.length() == 0) {
return idName;
}
// Add Prefix
StringBuffer nameBuf = new StringBuffer(prefix.length() + idName.length() + 1);
nameBuf.append(prefix);
nameBuf.append(ID_CONTEXT_SEPERATOR);
nameBuf.append(idName);
return nameBuf.toString();
} |
java | public String transformDDL(String ddl) {
return transformQuery(ddl, dropTableIfExistsDdlTransformer,
varcharBytesDdlTransformer, varbinaryDdlTransformer,
tinyintDdlTransformer, assumeUniqueDdlTransformer);
} |
java | public static int clen(int values, int bpv) {
int len = (values*bpv) >> 3;
return values*bpv % 8 == 0 ? len : len + 1;
} |
python | def download_to_file(self, file_obj, client=None, start=None, end=None):
"""Download the contents of this blob into a file-like object.
.. note::
If the server-set property, :attr:`media_link`, is not yet
initialized, makes an additional API request to load it.
Downloading a file that has been encrypted with a `customer-supplied`_
encryption key:
.. literalinclude:: snippets.py
:start-after: [START download_to_file]
:end-before: [END download_to_file]
:dedent: 4
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
For more fine-grained control over the download process, check out
`google-resumable-media`_. For example, this library allows
downloading **parts** of a blob rather than the whole thing.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
:type file_obj: file
:param file_obj: A file handle to which to write the blob's data.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type start: int
:param start: Optional, the first byte in a range to be downloaded.
:type end: int
:param end: Optional, The last byte in a range to be downloaded.
:raises: :class:`google.cloud.exceptions.NotFound`
"""
download_url = self._get_download_url()
headers = _get_encryption_headers(self._encryption_key)
headers["accept-encoding"] = "gzip"
transport = self._get_transport(client)
try:
self._do_download(transport, file_obj, download_url, headers, start, end)
except resumable_media.InvalidResponse as exc:
_raise_from_invalid_response(exc) |
java | private void postInitialize() {
if (log.isDebugEnabled()) {
log.debug("FLVReader 1 - Buffer size: {} position: {} remaining: {}", new Object[] { getTotalBytes(), getCurrentPosition(), getRemainingBytes() });
}
if (getRemainingBytes() >= 9) {
decodeHeader();
}
if (file != null) {
keyframeMeta = analyzeKeyFrames();
}
long old = getCurrentPosition();
log.debug("Position: {}", old);
} |
python | def stack_2_eqn(self,p):
"""returns equation string for program stack"""
stack_eqn = []
if p: # if stack is not empty
for n in p.stack:
self.eval_eqn(n,stack_eqn)
return stack_eqn[-1]
return [] |
python | def template_exists(template_name):
'''
Determine if a given template exists so that it can be loaded
if so, or a default alternative can be used if not.
'''
try:
template.loader.get_template(template_name)
return True
except template.TemplateDoesNotExist:
return False |
python | def reset_directory(directory):
"""
Remove `directory` if it exists, then create it if it doesn't exist.
"""
if os.path.isdir(directory): shutil.rmtree(directory)
if not os.path.isdir(directory): os.makedirs(directory) |
python | def enqueue(self, job):
"""Enqueue a job for later processing, returns the new length of the queue
"""
if job.queue_name():
raise EnqueueError("job %s already queued!" % job.job_id)
new_len = self.redis.lpush(self.queue_name, job.serialize())
job.notify_queued(self)
return new_len |
python | def web_services_from_str(
list_splitter_fn=ujson.loads,
):
"""
parameters:
list_splitter_fn - a function that will take the json compatible string
rerpesenting a list of mappings.
"""
# -------------------------------------------------------------------------
def class_list_converter(collector_services_str):
"""This function becomes the actual converter used by configman to
take a string and convert it into the nested sequence of Namespaces,
one for each class in the list. It does this by creating a proxy
class stuffed with its own 'required_config' that's dynamically
generated."""
if isinstance(collector_services_str, basestring):
all_collector_services = list_splitter_fn(collector_services_str)
else:
raise TypeError('must be derivative of a basestring')
# =====================================================================
class InnerClassList(RequiredConfig):
"""This nested class is a proxy list for the classes. It collects
all the config requirements for the listed classes and places them
each into their own Namespace.
"""
# we're dynamically creating a class here. The following block of
# code is actually adding class level attributes to this new class
# 1st requirement for configman
required_config = Namespace()
# to help the programmer know what Namespaces we added
subordinate_namespace_names = []
# for display
original_input = collector_services_str.replace('\n', '\\n')
# for each class in the class list
service_list = []
for namespace_index, collector_service_element in enumerate(
all_collector_services
):
service_name = collector_service_element['name']
service_uri = collector_service_element['uri']
service_implementation_class = class_converter(
collector_service_element['service_implementation_class']
)
service_list.append(
(
service_name,
service_uri,
service_implementation_class,
)
)
subordinate_namespace_names.append(service_name)
# create the new Namespace
required_config.namespace(service_name)
a_class_namespace = required_config[service_name]
a_class_namespace.add_option(
"service_implementation_class",
doc='fully qualified classname for a class that implements'
'the action associtated with the URI',
default=service_implementation_class,
from_string_converter=class_converter,
likely_to_be_changed=True,
)
a_class_namespace.add_option(
"uri",
doc='uri for this service',
default=service_uri,
likely_to_be_changed=True,
)
@classmethod
def to_str(cls):
"""this method takes this inner class object and turns it back
into the original string of classnames. This is used
primarily as for the output of the 'help' option"""
return "'%s'" % cls.original_input
return InnerClassList # result of class_list_converter
return class_list_converter |
python | def inputChecks(**_params_):
"""
This is a function to check all the input for GET APIs.
"""
def checkTypes(_func_, _params_ = _params_):
log = clog.error_log
@wraps(_func_)
def wrapped(*args, **kw):
arg_names = _func_.__code__.co_varnames[:_func_.__code__.co_argcount]
ka = {}
ka.update(list(zip(arg_names, args)))
ka.update(kw)
#print ka
for name, value in ka.iteritems():
#In fact the framework removes all the input variables that is not in the args list of _addMethod.
#So DBS list API will never see these variables. For example, if one has
#http://hostname/cms_dbs/DBS/datatiers?name=abc, the API will get a request to list all the datatiers because
#"name=abc" is removed by the framework since name is not a key work for the api.
if name !='self':
types = _params_[name]
#if name =='lumi_list': value = cjson.decode(value)
if not isinstance(value, types):
serverlog = "Expected '%s' to be %s; was %s." % (name, types, type(value))
#raise TypeError, "Expected '%s' to be %s; was %s." % (name, types, type(value))
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input DataType %s for %s..." %(type(value), name[:10]),\
logger=log.error, serverError=serverlog)
else:
try:
if isinstance(value, basestring):
try:
value = str(value)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid value for %s" %name)
if name == 'dataset':
if '*' in value:
searchdataset(value)
else:
reading_dataset_check(value)
elif name =='lumi_list': value = cjson.decode(value)
elif name =='validFileOnly':
try:
int(value)
except Exception as e:
dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error)
elif name =='sumOverLumi':
try:
int(value)
except Exception as e:
dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error)
elif name =='block_name':
if '*' in value:
searchblock(value)
else:
reading_block_check(value)
elif name =='primary_ds_name':
if '*' in value: searchstr(value)
else: primdataset(value)
elif name =='processed_ds_name':
if '*' in value:
searchstr(value)
else:
reading_procds_check(value)
elif name=='logical_file_name':
if '*' in value:
searchstr(value)
else:
reading_lfn_check(value)
elif name=='processing_version':
procversion(value)
elif name=='global_tag':
if '*' in value: searchstr(value)
else: globalTag(value)
elif name == 'create_by':
DBSUser(value)
elif name == 'last_modified_by':
DBSUser(value)
else:
searchstr(value)
elif type(value) == list:
if name == 'logical_file_name':
for f in value:
if '*' in f:
searchstr(f)
else:
reading_lfn_check(f)
elif name == 'block_names':
for block_name in value:
reading_block_check(block_name)
elif name == 'run_num':
for run_num in value:
try:
int(run_num)
except Exception:
try:
min_run, max_run = run_num.split('-', 1)
int(min_run)
int(max_run)
except Exception as e:
serverLog = str(e) + "\n run_num=%s is an invalid run number." %run_num
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid run number." %run_num[:10],\
serverError=serverLog, logger=log.error)
elif name == 'dataset_id':
for id in value:
try:
int(id)
except Exception :
try:
min_id, max_id = id.split('-', 1)
int(min_id)
int(max_id)
except Exception as e :
serverLog = str(e) + "\n dataset_id=%s is an invalid oracle id." %dataset_id
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid dataset_id." %id[:10], \
serverError=serverLog, logger=log.error)
except AssertionError as ae:
serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(name, value)
#print ae
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %value[:10],\
serverError=serverLog, logger=log.error)
except Exception as e1:
raise
return _func_(*args, **kw)
return wrapped
return checkTypes |
java | public static final VersionRegEx create(int major, int minor, int patch,
String preRelease,
String buildMetaData) {
checkParams(major, minor, patch);
require(preRelease != null, "preRelease is null");
require(buildMetaData != null, "buildMetaData is null");
if (!isValidPreRelease(preRelease)) {
throw new VersionFormatException(
String.format("Illegal pre-release part: %s", preRelease));
} else if (!isValidBuildMetaData(buildMetaData)) {
throw new VersionFormatException(
String.format("Illegal build-meta-data part: %s", buildMetaData));
}
return new VersionRegEx(major, minor, patch, preRelease, buildMetaData);
} |
python | def parse_esmtp_extensions(message: str) -> Tuple[Dict[str, str], List[str]]:
"""
Parse an EHLO response from the server into a dict of {extension: params}
and a list of auth method names.
It might look something like:
220 size.does.matter.af.MIL (More ESMTP than Crappysoft!)
EHLO heaven.af.mil
250-size.does.matter.af.MIL offers FIFTEEN extensions:
250-8BITMIME
250-PIPELINING
250-DSN
250-ENHANCEDSTATUSCODES
250-EXPN
250-HELP
250-SAML
250-SEND
250-SOML
250-TURN
250-XADR
250-XSTA
250-ETRN
250-XGEN
250 SIZE 51200000
"""
esmtp_extensions = {}
auth_types = [] # type: List[str]
response_lines = message.split("\n")
# ignore the first line
for line in response_lines[1:]:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH_REGEX.match(line)
if auth_match is not None:
auth_type = auth_match.group("auth")
auth_types.append(auth_type.lower().strip())
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
extensions = EXTENSIONS_REGEX.match(line)
if extensions is not None:
extension = extensions.group("ext").lower()
params = extensions.string[extensions.end("ext") :].strip()
esmtp_extensions[extension] = params
if extension == "auth":
auth_types.extend([param.strip().lower() for param in params.split()])
return esmtp_extensions, auth_types |
java | public void addProperties(List<JpaProperty> queryProperties) {
for (JpaProperty prop : queryProperties) {
Bean bean = putIfAbsent(prop.getId());
if (!JpaProperty.BEAN_MARKER_PROPERTY_NAME.equals(prop.getPropertyName())) {
bean.addProperty(prop.getPropertyName(), prop.getValue());
}
}
} |
java | public static MozuUrl updateItemQuantityUrl(String orderId, String orderItemId, Integer quantity, String responseFields, String updateMode, String version)
{
UrlFormatter formatter = new UrlFormatter("/api/commerce/orders/{orderId}/items/{orderItemId}/quantity/{quantity}?updatemode={updateMode}&version={version}&responseFields={responseFields}");
formatter.formatUrl("orderId", orderId);
formatter.formatUrl("orderItemId", orderItemId);
formatter.formatUrl("quantity", quantity);
formatter.formatUrl("responseFields", responseFields);
formatter.formatUrl("updateMode", updateMode);
formatter.formatUrl("version", version);
return new MozuUrl(formatter.getResourceUrl(), MozuUrl.UrlLocation.TENANT_POD) ;
} |
python | def handle_data(self, data):
"""Function called for text nodes"""
if not self.silent:
possible_urls = re.findall(
r'(https?://[\w\d:#%/;$()~_?\-=\\\.&]*)', data)
# validate possible urls
# we'll transform them just in case
# they are valid.
if possible_urls and self.automatic_link_transformation:
for url in possible_urls:
if regex_url.search(url):
transformed_url = '<a href="%s">%s</a>' % (url, url)
data = data.replace(url, transformed_url)
self.result += data
else:
self.result += cgi.escape(data, True) |
python | def main():
""" Generate sequences."""
parser = OptionParser(conflict_handler="resolve")
parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)')
parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)')
parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)')
parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)')
parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model')
parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model')
parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE')
parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.')
parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.')
parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5')
parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.")
parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.')
parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'")
parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.')
parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'")
parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.")
(options, args) = parser.parse_args()
main_folder = os.path.dirname(__file__)
default_models = {}
default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ']
default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ']
default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ']
default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ']
num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)])
if num_models_specified == 1: #exactly one model specified
try:
d_model = [x for x in default_models.keys() if getattr(options, x)][0]
model_folder = default_models[d_model][0]
recomb_type = default_models[d_model][1]
except IndexError:
if options.vdj_model_folder: #custom VDJ model specified
model_folder = options.vdj_model_folder
recomb_type = 'VDJ'
elif options.vj_model_folder: #custom VJ model specified
model_folder = options.vj_model_folder
recomb_type = 'VJ'
elif num_models_specified == 0:
print 'Need to indicate generative model.'
print 'Exiting...'
return -1
elif num_models_specified > 1:
print 'Only specify one model'
print 'Exiting...'
return -1
#Check that all model and genomic files exist in the indicated model folder
if not os.path.isdir(model_folder):
print 'Check pathing... cannot find the model folder: ' + model_folder
print 'Exiting...'
return -1
params_file_name = os.path.join(model_folder,'model_params.txt')
marginals_file_name = os.path.join(model_folder,'model_marginals.txt')
V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv')
J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv')
for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]:
if not os.path.isfile(x):
print 'Cannot find: ' + x
print 'Please check the files (and naming conventions) in the model folder ' + model_folder
print 'Exiting...'
return -1
if options.outfile_name is not None:
outfile_name = options.outfile_name
if os.path.isfile(outfile_name):
if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']:
print 'Exiting...'
return -1
#Parse arguments
num_seqs_to_generate = int(options.num_seqs_to_generate)
if num_seqs_to_generate <= 0:
print 'Need to specify num_seqs (number of sequences to generate).'
print 'Exiting...'
return -1
#Parse default delimiter
delimiter = options.delimiter
if delimiter is None:
delimiter = '\t'
if options.outfile_name is not None:
if outfile_name.endswith('.tsv'):
delimiter = '\t'
elif outfile_name.endswith('.csv'):
delimiter = ','
else:
try:
delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter]
except KeyError:
pass #Other raw string.
#Optional flags
seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type]
record_genes = options.record_genes
seqs_per_time_update = int(options.seqs_per_time_update)
time_updates = options.time_updates
conserved_J_residues = options.conserved_J_residues
if options.seed is not None:
np.random.seed(options.seed)
#VDJ recomb case --- used for TCRB and IGH
if recomb_type == 'VDJ':
genomic_data = load_model.GenomicDataVDJ()
genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)
generative_model = load_model.GenerativeModelVDJ()
generative_model.load_and_process_igor_model(marginals_file_name)
seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data)
#VJ recomb case --- used for TCRA and light chain
elif recomb_type == 'VJ':
genomic_data = load_model.GenomicDataVJ()
genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)
generative_model = load_model.GenerativeModelVJ()
generative_model.load_and_process_igor_model(marginals_file_name)
seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data)
V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV]
J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ]
if options.outfile_name is not None:
outfile = open(outfile_name, 'w')
print 'Starting sequence generation... '
start_time = time.time()
for i in range(num_seqs_to_generate):
ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues)
if seq_type == 'all': #default, include both ntseq and aaseq
current_line_out = ntseq + delimiter + aaseq
elif seq_type == 'ntseq': #only record ntseq
current_line_out = ntseq
elif seq_type == 'aaseq': #only record aaseq
current_line_out = aaseq
if record_genes:
current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in]
outfile.write(current_line_out + '\n')
if (i+1)%seqs_per_time_update == 0 and time_updates:
c_time = time.time() - start_time
eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time
if c_time > 86400: #more than a day
c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 3600: #more than an hr
c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 60: #more than a min
c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60)
else:
c_time_str = '%.2f seconds.'%(c_time)
if eta > 86400: #more than a day
eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60)
elif eta > 3600: #more than an hr
eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60)
elif eta > 60: #more than a min
eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60)
else:
eta_str = '%.2f seconds.'%(eta)
print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str)
c_time = time.time() - start_time
if c_time > 86400: #more than a day
c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 3600: #more than an hr
c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60)
elif c_time > 60: #more than a min
c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60)
else:
c_time_str = '%.2f seconds.'%(c_time)
print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str)
outfile.close()
else: #print to stdout
for i in range(num_seqs_to_generate):
ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues)
if seq_type == 'all': #default, include both ntseq and aaseq
current_line_out = ntseq + delimiter + aaseq
elif seq_type == 'ntseq': #only record ntseq
current_line_out = ntseq
elif seq_type == 'aaseq': #only record aaseq
current_line_out = aaseq
if record_genes:
current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in]
print current_line_out |
python | def _parse_indices(self, indices):
r"""
This private method accepts a list of pores or throats and returns a
properly structured Numpy array of indices.
Parameters
----------
indices : multiple options
This argument can accept numerous different data types including
boolean masks, integers and arrays.
Returns
-------
A Numpy array of indices.
Notes
-----
This method should only be called by the method that is actually using
the locations, to avoid calling it multiple times.
"""
if indices is None:
indices = sp.array([], ndmin=1, dtype=int)
locs = sp.array(indices, ndmin=1)
# If boolean array, convert to indices
if locs.dtype == bool:
if sp.size(locs) == self.Np:
locs = self.Ps[locs]
elif sp.size(locs) == self.Nt:
locs = self.Ts[locs]
else:
raise Exception('Mask of locations must be either ' +
'Np nor Nt long')
locs = locs.astype(dtype=int)
return locs |
python | def _validate(self):
"""Validate model data and save errors
"""
errors = {}
for name, validator in self._validators.items():
value = getattr(self, name)
try:
validator(self, value)
except ValidationError as e:
errors[name] = str(e)
self._validate_errors = errors |
python | def parse_file(self, sourcepath):
"""Parse an object-per-line JSON file into a log data dict"""
# Open input file and read JSON array:
with open(sourcepath, 'r') as logfile:
jsonlist = logfile.readlines()
# Set our attributes for this entry and add it to data.entries:
data = {}
data['entries'] = []
for line in jsonlist:
entry = self.parse_line(line)
data['entries'].append(entry)
if self.tzone:
for e in data['entries']:
e['tzone'] = self.tzone
# Return the parsed data
return data |
python | def audio_inputs(self):
"""
:return: A list of audio input :class:`Ports`.
"""
return self.client.get_ports(is_audio=True, is_physical=True, is_input=True) |
python | def show(ctx):
"""
Show migrations list
"""
for app_name, app in ctx.obj['config']['apps'].items():
click.echo(click.style(app_name, fg='green', bold=True))
for migration in app['migrations']:
applied = ctx.obj['db'].is_migration_applied(app_name, migration)
click.echo(' {0} {1}'.format(migration, click.style('(applied)', bold=True) if applied else '')) |
python | def betting_market_group_update(
self,
betting_market_group_id,
description=None,
event_id=None,
rules_id=None,
status=None,
account=None,
**kwargs
):
""" Update an betting market. This needs to be **proposed**.
:param str betting_market_group_id: Id of the betting market group
to update
:param list description: Internationalized list of descriptions
:param str event_id: Event ID to create this for
:param str rule_id: Rule ID to create this with
:param str status: New Status
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
bmg = BettingMarketGroup(betting_market_group_id)
# Do not try to update status of it doesn't change it on the chain
if bmg["status"] == status:
status = None
op_data = {
"fee": {"amount": 0, "asset_id": "1.3.0"},
"betting_market_group_id": bmg["id"],
"prefix": self.prefix,
}
if event_id:
if event_id[0] == "1":
# Test if object exists
Event(event_id)
else:
# Test if object is proposed
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer), "event_create", event_id
)
op_data.update({"new_event_id": event_id})
if rules_id:
if rules_id[0] == "1":
# Test if object exists
Rule(rules_id)
else:
# Test if object is proposed
test_proposal_in_buffer(
kwargs.get("append_to", self.propbuffer),
"betting_market_rules_create",
rules_id,
)
op_data.update({"new_rules_id": rules_id})
if description:
op_data.update({"new_description": description})
if status:
op_data.update({"status": status})
op = operations.Betting_market_group_update(**op_data)
return self.finalizeOp(op, account["name"], "active", **kwargs) |
python | def read_parfile(parfile):
"""load a pest-compatible .par file into a pandas.DataFrame
Parameters
----------
parfile : str
pest parameter file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
assert os.path.exists(parfile), "Pst.parrep(): parfile not found: " +\
str(parfile)
f = open(parfile, 'r')
header = f.readline()
par_df = pd.read_csv(f, header=None,
names=["parnme", "parval1", "scale", "offset"],
sep="\s+")
par_df.index = par_df.parnme
return par_df |
java | public int getId()
{
if (mTextureId != 0)
{
return mTextureId;
}
final CountDownLatch cdl = new CountDownLatch(1);
getGVRContext().runOnGlThread(new Runnable() {
@Override
public void run() {
NativeTexture.isReady(getNative());
mTextureId = NativeTexture.getId(getNative());
cdl.countDown();
}
});
try
{
cdl.await();
}
catch (final Exception exc)
{
throw new IllegalStateException("Exception waiting for texture ready");
}
return mTextureId;
} |
java | @CanIgnoreReturnValue
public final Ordered containsAtLeast(
@NullableDecl Object firstExpected,
@NullableDecl Object secondExpected,
@NullableDecl Object... restOfExpected) {
return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected));
} |
java | public static String binaryToInternal(String clazz) {
if (clazz.indexOf('/') >= 0 || clazz.indexOf('[') >= 0) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH, "'%s' is not a valid binary class name.", clazz));
}
return clazz.replace('.', '/');
} |
java | public static boolean isPrimitives(Class<?> clazz) {
if (clazz.isArray()) { // 数组,检查数组类型
return isPrimitiveType(clazz.getComponentType());
}
return isPrimitiveType(clazz);
} |
java | public final void parse(final Reader in, final ContentHandler handler)
throws IOException, ParserException {
final StreamTokenizer tokeniser = new StreamTokenizer(in);
try {
tokeniser.resetSyntax();
tokeniser.wordChars(WORD_CHAR_START, WORD_CHAR_END);
tokeniser.whitespaceChars(WHITESPACE_CHAR_START,
WHITESPACE_CHAR_END);
tokeniser.ordinaryChar(':');
tokeniser.ordinaryChar(';');
tokeniser.ordinaryChar('=');
tokeniser.ordinaryChar('\t');
tokeniser.eolIsSignificant(true);
tokeniser.whitespaceChars(0, 0);
tokeniser.quoteChar('"');
parseCalendarList(tokeniser, in, handler);
} catch (IOException | ParseException | URISyntaxException | RuntimeException e) {
if (e instanceof IOException) {
throw (IOException) e;
}
if (e instanceof ParserException) {
throw (ParserException) e;
} else {
throw new ParserException(e.getMessage(), getLineNumber(tokeniser, in), e);
}
}
} |
java | public static KaryonServer forTcpConnectionHandler(int port, ConnectionHandler<ByteBuf, ByteBuf> handler,
BootstrapModule... bootstrapModules) {
RxServer<ByteBuf, ByteBuf> server = RxNetty.newTcpServerBuilder(port, handler).build();
return new RxNettyServerBackedServer(server, bootstrapModules);
} |
python | def get_instance(self, payload):
"""
Build an instance of StepInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.studio.v1.flow.engagement.step.StepInstance
:rtype: twilio.rest.studio.v1.flow.engagement.step.StepInstance
"""
return StepInstance(
self._version,
payload,
flow_sid=self._solution['flow_sid'],
engagement_sid=self._solution['engagement_sid'],
) |
python | def make(target="all", dir=".", **kwargs):
"""
Run make.
Arguments:
target (str, optional): Name of the target to build. Defaults
to "all".
dir (str, optional): Path to directory containing Makefile.
**kwargs (optional): Any additional arguments to be passed to
system.run().
Returns:
(int, str, str): The first element is the return code of the
make command. The second and third elements are the stdout
and stderr of the process.
Raises:
NoMakefileError: In case a Makefile is not found in the target
directory.
NoTargetError: In case the Makefile does not support the
requested target.
MakeError: In case the target rule fails.
"""
if not fs.isfile(fs.path(dir, "Makefile")):
raise NoMakefileError("No makefile in '{}'".format(fs.abspath(dir)))
fs.cd(dir)
# Default parameters to system.run()
if "timeout" not in kwargs: kwargs["timeout"] = 300
ret, out, err = system.run(["make", target], **kwargs)
fs.cdpop()
if ret > 0:
if re.search(_BAD_TARGET_RE, err):
raise NoTargetError("No rule for target '{}'"
.format(target))
else:
raise MakeError("Target '{}' failed".format(target))
raise MakeError("Failed")
return ret, out, err |
java | public double[] update( double w, double c1, double rand1, double c2, double rand2, double[] globalBest ) {
for( int i = 0; i < locations.length; i++ ) {
particleVelocities[i] = w * particleVelocities[i] + //
c1 * rand1 * (particleLocalBests[i] - locations[i]) + //
c2 * rand2 * (globalBest[i] - locations[i]);
double tmpLocation = locations[i] + particleVelocities[i];
/*
* if the location falls outside the ranges, it should
* not be moved.
*/
tmpLocations[i] = tmpLocation;
}
if (!PSEngine.parametersInRange(tmpLocations, ranges)) {
// System.out.println("PRE-TMPLOCATIONS: " + Arrays.toString(tmpLocations));
// System.out.println("LOCATIONS: " + Arrays.toString(locations));
/*
* mirror the value back
*/
for( int i = 0; i < tmpLocations.length; i++ ) {
double min = ranges[i][0];
double max = ranges[i][1];
if (tmpLocations[i] > max) {
double tmp = max - (tmpLocations[i] - max);
if (tmp < min) {
tmp = max;
}
locations[i] = tmp;
} else if (tmpLocations[i] < min) {
double tmp = min + (min - tmpLocations[i]);
if (tmp > max) {
tmp = min;
}
locations[i] = tmp;
} else {
locations[i] = tmpLocations[i];
}
}
// System.out.println("POST-LOCATIONS: " + Arrays.toString(locations));
// System.out.println("VELOCITIES: " + Arrays.toString(particleVelocities));
return null;
} else {
for( int i = 0; i < locations.length; i++ ) {
locations[i] = tmpLocations[i];
}
return locations;
}
} |
java | @Override
@UiThread
public void onAttachedToRecyclerView(@NonNull RecyclerView recyclerView) {
super.onAttachedToRecyclerView(recyclerView);
mAttachedRecyclerViewPool.add(recyclerView);
} |
java | public void pushLogging(String key, Object value) {
assertArgumentNotNull("key", key);
assertArgumentNotNull("value", value);
postcard.pushLogging(key, value);
} |
python | def parse_rdp_assignment(line):
"""Returns a list of assigned taxa from an RDP classification line
"""
toks = line.strip().split('\t')
seq_id = toks.pop(0)
direction = toks.pop(0)
if ((len(toks) % 3) != 0):
raise ValueError(
"Expected assignments in a repeating series of (rank, name, "
"confidence), received %s" % toks)
assignments = []
# Fancy way to create list of triples using consecutive items from
# input. See grouper function in documentation for itertools for
# more general example.
itoks = iter(toks)
for taxon, rank, confidence_str in zip(itoks, itoks, itoks):
if not taxon:
continue
assignments.append((taxon.strip('"'), rank, float(confidence_str)))
return seq_id, direction, assignments |
java | public synchronized Counter findCounter(String group, String name) {
return getGroup(group).getCounterForName(name);
} |
java | public void setUnsuccessfulInstanceCreditSpecifications(
java.util.Collection<UnsuccessfulInstanceCreditSpecificationItem> unsuccessfulInstanceCreditSpecifications) {
if (unsuccessfulInstanceCreditSpecifications == null) {
this.unsuccessfulInstanceCreditSpecifications = null;
return;
}
this.unsuccessfulInstanceCreditSpecifications = new com.amazonaws.internal.SdkInternalList<UnsuccessfulInstanceCreditSpecificationItem>(
unsuccessfulInstanceCreditSpecifications);
} |
java | public ErrorRootCause withServices(ErrorRootCauseService... services) {
if (this.services == null) {
setServices(new java.util.ArrayList<ErrorRootCauseService>(services.length));
}
for (ErrorRootCauseService ele : services) {
this.services.add(ele);
}
return this;
} |
python | def setup_logger(options):
"""Do the logger setup with options."""
LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN)
if options.report:
LOGGER.removeHandler(STREAM)
LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
if options.options:
LOGGER.info('Try to read configuration from: %r', options.options) |
java | @Override
public final String getNamespaceURI(String prefix)
{
if (prefix == null) {
throw new IllegalArgumentException(ErrorConsts.ERR_NULL_ARG);
}
if (prefix.length() == 0) {
if (mDepth == 0) { // unexpected... but let's not err at this point
/* 07-Sep-2007, TSa: Default/"no namespace" does map to
* "URI" of empty String.
*/
return XmlConsts.DEFAULT_NAMESPACE_URI;
}
return mCurrElement.mDefaultNsURI;
}
if (prefix.equals(XMLConstants.XML_NS_PREFIX)) {
return XMLConstants.XML_NS_URI;
}
if (prefix.equals(XMLConstants.XMLNS_ATTRIBUTE)) {
return XMLConstants.XMLNS_ATTRIBUTE_NS_URI;
}
/* Ok, need to find the match, if any; starting from end of the
* list of active namespaces. Note that we can not count on prefix
* being interned/canonicalized.
*/
return mNamespaces.findLastNonInterned(prefix);
} |
java | private static boolean setVTMode() {
long console = GetStdHandle(STD_OUTPUT_HANDLE);
int[] mode = new int[1];
if (Kernel32.GetConsoleMode(console, mode) == 0) {
// No need to go further, not supported.
return false;
}
if (Kernel32.SetConsoleMode(console, mode[0] | VIRTUAL_TERMINAL_PROCESSING) == 0) {
// No need to go further, not supported.
return false;
}
return true;
} |
python | def get_occupancy(last, bucketsize):
"""
We deliver historical occupancy up until "now". If the building has occupancy sensors, we pull that data
and aggregate it by zone. Take mean occupancy per zone (across all sensors).
If building does *not* have occupancy sensors, then we need to read the results from some occupancy file.
"""
if last not in ['hour','day','week']:
return "Must be hour, day, week"
start_date = get_start(last)
zones = defaultdict(list)
prediction_start = datetime.now(config.TZ)
md = config.HOD.do_query(occupancy_query)
if md['Rows'] is not None:
for row in md['Rows']:
zones[row['?zone']].append(row['?occ_uuid'])
q = occupancy_data_query.copy()
q["Time"] = {
"T0": start_date.strftime("%Y-%m-%d %H:%M:%S %Z"),
"T1": prediction_start.strftime("%Y-%m-%d %H:%M:%S %Z"),
"WindowSize": bucketsize,
"Aligned": True,
}
resp = config.MDAL.do_query(q, timeout=120)
if 'error' in resp:
print 'ERROR', resp
return
df = resp['df'].fillna(method='ffill')
for zone, uuidlist in zones.items():
if len(uuidlist) > 0:
zones[zone] = json.loads(df[uuidlist].mean(axis=1).to_json())
else:
zones[zone] = {}
# get predicted output
prediction_end = get_tomorrow()
predicted = list(rrule.rrule(freq=rrule.HOURLY, dtstart=prediction_start, until=prediction_end))
for zone, occdict in zones.items():
for date in predicted:
occdict[int(int(date.strftime('%s'))*1000)] = 'predicted' # prediction
zones[zone] = occdict
else:
md = config.HOD.do_query(zone_query)
zonenames = [x['?zone'].lower() for x in md['Rows']]
conn = sqlite3.connect('occupancy_schedule.db')
sql = conn.cursor()
for zone in zonenames:
query = "SELECT * FROM schedules WHERE site='{0}' and zone='{1}' and dayofweek='{2}'".format(config.SITE, zone, prediction_start.strftime('%A').lower())
res = sql.execute(query).fetchall()
records = {'time': [], 'occ': [], 'zone': []}
for sqlrow in res:
hour, minute = sqlrow[3].split(':')
time = datetime(year=prediction_start.year, month=prediction_start.month, day=prediction_start.day, hour=int(hour), minute=int(minute), tzinfo=prediction_start.tzinfo)
occ = sqlrow[5]
zone = sqlrow[1]
records['time'].append(time)
records['occ'].append(occ)
records['zone'].append(zone)
df = pd.DataFrame.from_records(records)
df = df.set_index(df.pop('time'))
if len(df) ==0:
continue
sched = df.resample(bucketsize.replace('m','T')).ffill()
zones[zone] = json.loads(sched['occ'].to_json())
conn.close()
return zones |
python | def _get_message(self, key, since=None):
"""Return the MdMessage object for the key.
The object is either returned from the cache in the store or
made, cached and then returned.
If 'since' is passed in the modification time of the file is
checked and the message is only returned if the mtime is since
the specified time.
If the 'since' check fails, None is returned.
'since' must be seconds since epoch.
"""
stored = self.store[key]
if isinstance(stored, dict):
filename = stored["path"]
folder = stored["folder"]
if since and since > 0.0:
st = stat(filename)
if st.st_mtime < since:
return None
stored = MdMessage(
key,
filename = filename,
folder = folder,
filesystem = folder.filesystem
)
self.store[key] = stored
else:
if since and since > 0.0:
st = stat(stored.filename)
if st.st_mtime < since:
return None
return stored |
java | private static String decode(String s) {
int n = INITIAL_N;
int i = 0;
int bias = INITIAL_BIAS;
StringBuffer output = new StringBuffer();
int d = s.lastIndexOf(DELIMITER);
if (d > 0) {
for (int j = 0; j < d; j++) {
char c = s.charAt(j);
if (!basicCodePoint(c)) {
throw new IllegalArgumentException("bad input: " + c);
}
output.append(c);
}
d++;
} else {
d = 0;
}
while (d < s.length()) {
int oldi = i;
int w = 1;
for (int k = BASE;; k += BASE) {
if (d == s.length()) {
throw new IllegalArgumentException("bad input: " + d);
}
int c = s.charAt(d++);
int digit = decodeDigit(c);
if (digit > (Integer.MAX_VALUE - i) / w) {
throw new IllegalArgumentException("encoding overflow");
}
i = i + digit * w;
int t;
if (k <= bias) {
t = TMIN;
} else if (k >= bias + TMAX) {
t = TMAX;
} else {
t = k - bias;
}
if (digit < t) {
break;
}
w = w * (BASE - t);
}
bias = adapt(i - oldi, output.length() + 1, oldi == 0);
if (i / (output.length() + 1) > Integer.MAX_VALUE - n) {
throw new IllegalArgumentException("encoding overflow");
}
n = n + i / (output.length() + 1);
i = i % (output.length() + 1);
output.insert(i, (char) n);
i++;
}
return output.toString();
} |
python | def main():
"""
Testing function for PDA - DFA Diff Operation
"""
if len(argv) < 2:
print 'Usage: '
print ' Get A String %s CFG_fileA FST_fileB' % argv[0]
return
alphabet = createalphabet()
cfgtopda = CfgPDA(alphabet)
print '* Parsing Grammar:',
mma = cfgtopda.yyparse(argv[1])
print 'OK'
flex_a = Flexparser(alphabet)
print '* Parsing Regex:',
mmb = flex_a.yyparse(argv[2])
print mmb
print 'OK'
print '* Minimize Automaton:',
mmb.minimize()
print 'OK'
print mmb
print '* Diff:',
ops = PdaDiff(mma, mmb, alphabet)
mmc = ops.diff()
print 'OK'
print '* Get String:',
print ops.get_string() |
java | public static int validate(final String jobName, final Props serverProps, final Props jobProps,
final Collection<String> errors) {
final int maxNumCallback =
serverProps.getInt(
JobCallbackConstants.MAX_CALLBACK_COUNT_PROPERTY_KEY,
JobCallbackConstants.DEFAULT_MAX_CALLBACK_COUNT);
final int maxPostBodyLength =
serverProps.getInt(MAX_POST_BODY_LENGTH_PROPERTY_KEY,
DEFAULT_POST_BODY_LENGTH);
int totalCallbackCount = 0;
for (final JobCallbackStatusEnum jobStatus : JobCallbackStatusEnum.values()) {
totalCallbackCount +=
validateBasedOnStatus(jobProps, errors, jobStatus, maxNumCallback,
maxPostBodyLength);
}
if (logger.isDebugEnabled()) {
logger.debug("Found " + totalCallbackCount + " job callbacks for job "
+ jobName);
}
return totalCallbackCount;
} |
java | private void setCalendar(Calendar c, boolean update) {
if (c == null) {
setDate(null);
}
Calendar oldCalendar = calendar;
calendar = c;
if (update) {
// Thanks to Jeff Ulmer for correcting a bug in the sequence :)
yearChooser.setYear(c.get(Calendar.YEAR));
monthChooser.setMonth(c.get(Calendar.MONTH));
dayChooser.setDay(c.get(Calendar.DATE));
}
firePropertyChange("calendar", oldCalendar, calendar);
} |
python | def to_netcdf(self, *args, **kwargs):
"""Write DataArray contents to a netCDF file.
Parameters
----------
path : str or Path, optional
Path to which to save this dataset. If no path is provided, this
function returns the resulting netCDF file as a bytes object; in
this case, we need to use scipy.io.netcdf, which does not support
netCDF version 4 (the default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,
'zlib': True}, ...}``
Notes
-----
Only xarray.Dataset objects can be written to netCDF files, so
the xarray.DataArray is converted to a xarray.Dataset object
containing a single variable. If the DataArray has no name, or if the
name is the same as a co-ordinate name, then it is given the name
'__xarray_dataarray_variable__'.
All parameters are passed directly to `xarray.Dataset.to_netcdf`.
"""
from ..backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
if self.name is None:
# If no name is set then use a generic xarray name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
elif self.name in self.coords or self.name in self.dims:
# The name is the same as one of the coords names, which netCDF
# doesn't support, so rename it but keep track of the old name
dataset = self.to_dataset(name=DATAARRAY_VARIABLE)
dataset.attrs[DATAARRAY_NAME] = self.name
else:
# No problems with the name - so we're fine!
dataset = self.to_dataset()
return dataset.to_netcdf(*args, **kwargs) |
java | @Override
public GetIndexingConfigurationResult getIndexingConfiguration(GetIndexingConfigurationRequest request) {
request = beforeClientExecution(request);
return executeGetIndexingConfiguration(request);
} |
python | def do_lzop_get(creds, url, path, decrypt, do_retry=True):
"""
Get and decompress a S3 URL
This streams the content directly to lzop; the compressed version
is never stored on disk.
"""
assert url.endswith('.lzo'), 'Expect an lzop-compressed file'
def log_wal_fetch_failures_on_error(exc_tup, exc_processor_cxt):
def standard_detail_message(prefix=''):
return (prefix + ' There have been {n} attempts to fetch wal '
'file {url} so far.'.format(n=exc_processor_cxt, url=url))
typ, value, tb = exc_tup
del exc_tup
# Screen for certain kinds of known-errors to retry from
if issubclass(typ, socket.error):
socketmsg = value[1] if isinstance(value, tuple) else value
logger.info(
msg='Retrying fetch because of a socket error',
detail=standard_detail_message(
"The socket error's message is '{0}'."
.format(socketmsg)))
elif (issubclass(typ, boto.exception.S3ResponseError) and
value.error_code == 'RequestTimeTooSkewed'):
logger.info(msg='Retrying fetch because of a Request Skew time',
detail=standard_detail_message())
else:
# For all otherwise untreated exceptions, report them as a
# warning and retry anyway -- all exceptions that can be
# justified should be treated and have error messages
# listed.
logger.warning(
msg='retrying WAL file fetch from unexpected exception',
detail=standard_detail_message(
'The exception type is {etype} and its value is '
'{evalue} and its traceback is {etraceback}'
.format(etype=typ, evalue=value,
etraceback=''.join(traceback.format_tb(tb)))))
# Help Python GC by resolving possible cycles
del tb
def download():
with files.DeleteOnError(path) as decomp_out:
key = _uri_to_key(creds, url)
with get_download_pipeline(PIPE, decomp_out.f, decrypt) as pl:
g = gevent.spawn(write_and_return_error, key, pl.stdin)
try:
# Raise any exceptions from write_and_return_error
exc = g.get()
if exc is not None:
raise exc
except boto.exception.S3ResponseError as e:
if e.status == 404:
# Do not retry if the key not present, this
# can happen under normal situations.
pl.abort()
logger.info(
msg=('could no longer locate object while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'located is {url}.'.format(url=url)),
hint=('This can be normal when Postgres is trying '
'to detect what timelines are available '
'during restoration.'))
decomp_out.remove_regardless = True
return False
elif e.value.error_code == 'ExpiredToken':
# Do not retry if STS token has expired. It can never
# succeed in the future anyway.
pl.abort()
logger.info(
msg=('could no longer authenticate while '
'performing wal restore'),
detail=('The absolute URI that could not be '
'accessed is {url}.'.format(url=url)),
hint=('This can be normal when using STS '
'credentials.'))
decomp_out.remove_regardless = True
return False
else:
raise
logger.info(
msg='completed download and decompression',
detail='Downloaded and decompressed "{url}" to "{path}"'
.format(url=url, path=path))
return True
if do_retry:
download = retry(
retry_with_count(log_wal_fetch_failures_on_error))(download)
return download() |
java | public void scanClass(InputStream bits) throws IOException
{
DataInputStream dstream = new DataInputStream(new BufferedInputStream(bits));
ClassFile cf = null;
try
{
cf = new ClassFile(dstream);
String className = cf.getName();
List<String> annotations = new ArrayList<String>();
accumulateAnnotations(annotations, (AnnotationsAttribute) cf.getAttribute(AnnotationsAttribute.visibleTag));
accumulateAnnotations(annotations,
(AnnotationsAttribute) cf.getAttribute(AnnotationsAttribute.invisibleTag));
// iterate through all valid annotations
for (String validAnn : getValidAnnotations())
{
// check if the current class has one?
if (annotations.contains(validAnn))
{
// fire all listeners
for (AnnotationDiscoveryListener listener : getAnnotationDiscoveryListeners())
{
listener.discovered(className);
}
}
}
}
finally
{
dstream.close();
bits.close();
}
} |
java | void setBits(BitSet table) {
for (int c = Character.MAX_VALUE; c >= Character.MIN_VALUE; c--) {
if (matches((char) c)) {
table.set(c);
}
}
} |
java | @Override
public void storePortletEntity(HttpServletRequest request, final IPortletEntity portletEntity) {
Validate.notNull(portletEntity, "portletEntity can not be null");
final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request);
final IPerson person = userInstance.getPerson();
if (person.isGuest()) {
// Never persist things for the guest user, just rely on in-memory storage
return;
}
final IPortletEntityId wrapperPortletEntityId = portletEntity.getPortletEntityId();
final Lock portletEntityLock = this.getPortletEntityLock(request, wrapperPortletEntityId);
portletEntityLock.lock();
try {
final boolean shouldBePersisted = this.shouldBePersisted(portletEntity);
if (portletEntity instanceof PersistentPortletEntityWrapper) {
// Unwrap the persistent entity
final IPortletEntity persistentEntity =
((PersistentPortletEntityWrapper) portletEntity).getPersistentEntity();
// Already persistent entity that still has prefs
if (shouldBePersisted) {
try {
this.portletEntityDao.updatePortletEntity(persistentEntity);
} catch (HibernateOptimisticLockingFailureException e) {
// Check if this exception is from the entity being deleted from under us.
final boolean exists =
this.portletEntityDao.portletEntityExists(
persistentEntity.getPortletEntityId());
if (!exists) {
this.logger.warn(
"The persistent portlet has already been deleted: "
+ persistentEntity
+ ". The passed entity should be persistent so a new persistent entity will be created");
this.deletePortletEntity(request, portletEntity, true);
this.createPersistentEntity(persistentEntity, wrapperPortletEntityId);
} else {
throw e;
}
}
}
// Already persistent entity that should not be, DELETE!
else {
// Capture identifiers needed to recreate the entity as session persistent
final IPortletDefinitionId portletDefinitionId =
portletEntity.getPortletDefinitionId();
final String layoutNodeId = portletEntity.getLayoutNodeId();
final int userId = portletEntity.getUserId();
// Delete the persistent entity
this.deletePortletEntity(request, portletEntity, false);
// Create a new entity and stick it in the cache
this.getOrCreatePortletEntity(
request, portletDefinitionId, layoutNodeId, userId);
}
} else if (portletEntity instanceof SessionPortletEntityImpl) {
// There are preferences on the interim entity, create an store it
if (shouldBePersisted) {
// Remove the session scoped entity from the request and session caches
this.deletePortletEntity(request, portletEntity, false);
final IPortletEntity persistentEntity =
createPersistentEntity(portletEntity, wrapperPortletEntityId);
if (this.logger.isTraceEnabled()) {
this.logger.trace(
"Session scoped entity "
+ wrapperPortletEntityId
+ " should now be persistent. Deleted it from session cache and created persistent portlet entity "
+ persistentEntity.getPortletEntityId());
}
}
// Session scoped entity that is still session scoped,
else {
// Look for a persistent entity and delete it
final String channelSubscribeId = portletEntity.getLayoutNodeId();
final int userId = portletEntity.getUserId();
IPortletEntity existingPersistentEntity =
this.portletEntityDao.getPortletEntity(channelSubscribeId, userId);
if (existingPersistentEntity != null) {
final IPortletEntityId consistentPortletEntityId =
this.createConsistentPortletEntityId(existingPersistentEntity);
existingPersistentEntity =
new PersistentPortletEntityWrapper(
existingPersistentEntity, consistentPortletEntityId);
this.logger.warn(
"A persistent portlet entity already exists: "
+ existingPersistentEntity
+ ". The passed entity has no preferences so the persistent version will be deleted");
this.deletePortletEntity(request, existingPersistentEntity, false);
// Add to request cache
final PortletEntityCache<IPortletEntity> portletEntityMap =
this.getPortletEntityMap(request);
portletEntityMap.storeIfAbsentEntity(portletEntity);
// Add to session cache
final PortletEntityCache<PortletEntityData> portletEntityDataMap =
this.getPortletEntityDataMap(request);
portletEntityDataMap.storeIfAbsentEntity(
((SessionPortletEntityImpl) portletEntity).getPortletEntityData());
}
}
} else {
throw new IllegalArgumentException(
"Invalid portlet entity implementation passed: "
+ portletEntity.getClass());
}
} finally {
portletEntityLock.unlock();
}
} |
python | def to_pickle(graph: BELGraph, file: Union[str, BinaryIO], protocol: int = HIGHEST_PROTOCOL) -> None:
"""Write this graph to a pickle object with :func:`networkx.write_gpickle`.
Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable
pickle, choose 0, 1, or 2.
:param graph: A BEL graph
:param file: A file or filename to write to
:param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``.
.. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
"""
raise_for_not_bel(graph)
nx.write_gpickle(graph, file, protocol=protocol) |
python | def get_by_signature(user_id, app_id):
'''
Get the collection.
'''
try:
return TabCollect.get(
(TabCollect.user_id == user_id) &
(TabCollect.post_id == app_id)
)
except:
return None |
java | protected DBSort.SortBuilder getSortBuilder(String order, String field) {
DBSort.SortBuilder sortBuilder;
if ("desc".equalsIgnoreCase(order)) {
sortBuilder = DBSort.desc(field);
} else {
sortBuilder = DBSort.asc(field);
}
return sortBuilder;
} |
java | @Indexable(type = IndexableType.DELETE)
@Override
public CommercePriceEntry deleteCommercePriceEntry(
long commercePriceEntryId) throws PortalException {
return commercePriceEntryPersistence.remove(commercePriceEntryId);
} |
python | def _purge_crawl(self, spiderid, appid, crawlid):
'''
Wrapper for purging the crawlid from the queues
@param spiderid: the spider id
@param appid: the app id
@param crawlid: the crawl id
@return: The number of requests purged
'''
# purge three times to try to make sure everything is cleaned
total = self._mini_purge(spiderid, appid, crawlid)
total = total + self._mini_purge(spiderid, appid, crawlid)
total = total + self._mini_purge(spiderid, appid, crawlid)
return total |
java | void push( int slots ) {
assert 0 <= slots && slots < 1000;
int len = _d.length;
_sp += slots;
while( _sp > len ) {
_key= Arrays.copyOf(_key,len<<1);
_ary= Arrays.copyOf(_ary,len<<1);
_d = Arrays.copyOf(_d ,len<<1);
_fcn= Arrays.copyOf(_fcn,len<<=1);
_str= Arrays.copyOf(_str,len<<1);
}
} |
java | @Override
public void visit(NodeVisitor v) {
if (v.visit(this)) {
testExpression.visit(v);
trueExpression.visit(v);
falseExpression.visit(v);
}
} |
java | public By getElementLocatorForElementReference(String elementReference) {
Map<String, By> objectReferenceMap = getObjectReferenceMap(By.class);
By elementLocator = objectReferenceMap.get(elementReference);
if (elementLocator == null) {
fail("No elementLocator is found for element name: '" + elementReference + "'. Available element references are: " + objectReferenceMap.keySet().toString());
}
return elementLocator;
} |
java | public Map<String, String> getCustomRequestHeaders() {
if (customRequestHeaders == null) {
return null;
}
return Collections.unmodifiableMap(customRequestHeaders);
} |
java | static double inner_product(SparseVector vec1, SparseVector vec2)
{
Iterator<Map.Entry<Integer, Double>> it;
SparseVector other;
if (vec1.size() < vec2.size())
{
it = vec1.entrySet().iterator();
other = vec2;
}
else
{
it = vec2.entrySet().iterator();
other = vec1;
}
double prod = 0;
while (it.hasNext())
{
Map.Entry<Integer, Double> entry = it.next();
prod += entry.getValue() * other.get(entry.getKey());
}
return prod;
} |
java | public void appendToSubVer(String name, String version, @Nullable String comments) {
checkSubVerComponent(name);
checkSubVerComponent(version);
if (comments != null) {
checkSubVerComponent(comments);
subVer = subVer.concat(String.format(Locale.US, "%s:%s(%s)/", name, version, comments));
} else {
subVer = subVer.concat(String.format(Locale.US, "%s:%s/", name, version));
}
} |
java | @Override
public int read() throws IOException {
int c = origStream.read();
if (c != -1) {
os.write(c);
} else {
os.close();
}
return c;
} |
python | def decrypt_ecb(self, data):
"""
Return an iterator that decrypts `data` using the Electronic Codebook (ECB)
mode of operation.
ECB mode can only operate on `data` that is a multiple of the block-size
in length.
Each iteration returns a block-sized :obj:`bytes` object (i.e. 8 bytes)
containing the decrypted bytes of the corresponding block in `data`.
`data` should be a :obj:`bytes`-like object that is a multiple of the
block-size in length (i.e. 8, 16, 32, etc.).
If it is not, a :exc:`ValueError` exception is raised.
"""
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
decrypt = self._decrypt
u4_2_pack = self._u4_2_pack
try:
LR_iter = self._u4_2_iter_unpack(data)
except struct_error:
raise ValueError("data is not a multiple of the block-size in length")
for cipher_L, cipher_R in LR_iter:
yield u4_2_pack(
*decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
) |
java | public Broadcast startBroadcast(String sessionId, BroadcastProperties properties) throws OpenTokException {
if (StringUtils.isEmpty(sessionId) || (properties == null)) {
throw new InvalidArgumentException("Session not valid or broadcast properties is null");
}
String broadcast = this.client.startBroadcast(sessionId, properties);
try {
return broadcastReader.readValue(
broadcast);
} catch (Exception e) {
throw new RequestException("Exception mapping json: " + e.getMessage());
}
} |
java | public FacesConfigFacetType<FacesConfigRendererType<T>> getOrCreateFacet()
{
List<Node> nodeList = childNode.get("facet");
if (nodeList != null && nodeList.size() > 0)
{
return new FacesConfigFacetTypeImpl<FacesConfigRendererType<T>>(this, "facet", childNode, nodeList.get(0));
}
return createFacet();
} |
java | @SuppressWarnings("unchecked")
public static NumberList delta(Map<String, Object> currentMap, NumberList previousMap) {
return delta(currentMap, (Map)previousMap.numbers);
} |
java | public void marshall(EnvironmentLanguage environmentLanguage, ProtocolMarshaller protocolMarshaller) {
if (environmentLanguage == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(environmentLanguage.getLanguage(), LANGUAGE_BINDING);
protocolMarshaller.marshall(environmentLanguage.getImages(), IMAGES_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def read_text_from_conll_file( file_name, layer_name=LAYER_CONLL, **kwargs ):
''' Reads the CONLL format syntactic analysis from given file, and returns as
a Text object.
The Text object has been tokenized for paragraphs, sentences, words, and it
contains syntactic analyses aligned with word spans, in the layer *layer_name*
(by default: LAYER_CONLL);
Attached syntactic analyses are in the format as is the output of
utils.normalise_alignments();
Parameters
-----------
file_name : str
Name of the input file; Should contain syntactically analysed text,
following the CONLL format;
layer_name : str
Name of the Text's layer in which syntactic analyses are stored;
Defaults to 'conll_syntax';
For other parameters, see optional parameters of the methods:
utils.normalise_alignments(): "rep_miss_w_dummy", "fix_selfrefs",
"keep_old", "mark_root";
maltparser_support.align_CONLL_with_Text(): "check_tokens", "add_word_ids";
'''
# 1) Load conll analysed text from file
conll_lines = []
in_f = codecs.open(file_name, mode='r', encoding='utf-8')
for line in in_f:
# Skip comment lines
if line.startswith('#'):
continue
conll_lines.append( line.rstrip() )
in_f.close()
# 2) Extract sentences and word tokens
sentences = []
sentence = []
for i, line in enumerate( conll_lines ):
if len(line) > 0 and '\t' in line:
features = line.split('\t')
if len(features) != 10:
raise Exception(' In file '+in_file+', line '+str(i)+\
' with unexpected format: "'+line+'" ')
word_id = features[0]
token = features[1]
sentence.append( token )
elif len(line)==0 or re.match('^\s+$', line):
# End of a sentence
if sentence:
# (!) Use double space instead of single space in order to distinguish
# word-tokenizing space from the single space in the multiwords
# (e.g. 'Rio de Janeiro' as a single word);
sentences.append( ' '.join(sentence) )
sentence = []
if sentence:
sentences.append( ' '.join(sentence) )
# 3) Construct the estnltk's Text
kwargs4text = {
# Use custom tokenization utils in order to preserve exactly the same
# tokenization as was in the input;
"word_tokenizer": RegexpTokenizer(" ", gaps=True),
"sentence_tokenizer": LineTokenizer()
}
from estnltk.text import Text
text = Text( '\n'.join(sentences), **kwargs4text )
# Tokenize up to the words layer
text.tokenize_words()
# 4) Align syntactic analyses with the Text
alignments = align_CONLL_with_Text( conll_lines, text, None, **kwargs )
normalise_alignments( alignments, data_type=CONLL_DATA, **kwargs )
# Attach alignments to the text
text[ layer_name ] = alignments
return text |
python | def auth_plugins(auth_plugins=None):
"""Authentication plugins.
Usage, Add any plugin here that will serve as a rapid means to
authenticate to an OpenStack environment.
Syntax is as follows:
>>> __auth_plugins__ = {
... 'new_plugin_name': {
... 'os_auth_url': 'https://localhost:5000/v2.0/tokens',
... 'os_prefix': {
... 'os_apikey': 'apiKeyCredentials',
... 'os_password': 'passwordCredentials'
... },
... 'args': {
... 'commands': [
... '--new-plugin-name-auth'
... ],
... 'choices': [
... 'RegionOne'
... ],
... 'help': 'Authentication plugin for New Plugin Name',
... 'default': os.environ.get('OS_NEW_PLUGIN_AUTH', None),
... 'metavar': '[REGION]'
... }
... }
... }
If the subdomain is in the auth url, as is the case with hp, add
"%(region)s" to the "os_auth_url" value. The region value from the list of
choices will be used as the string replacement. Note that if the
`os_prefix` key is added the system will override the authentication body
prefix with the string provided. At this time the choices are os_apikey,
os_password, os_token. All key entries are optional and should one not be
specified with a credential type a `NotImplementedError` will be raised.
:param auth_plugins: Additional plugins to add in
:type auth_plugins: ``dict``
:returns: ``dict``
"""
__auth_plugins__ = {
'os_rax_auth': {
'os_auth_url': 'https://identity.api.rackspacecloud.com/v2.0/'
'tokens',
'os_prefix': {
'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-rax-auth'
],
'choices': [
'dfw',
'ord',
'iad',
'syd',
'hkg',
'lon'
],
'help': 'Authentication Plugin for Rackspace Cloud'
' env[OS_RAX_AUTH]',
'default': os.environ.get('OS_RAX_AUTH', None),
'metavar': '[REGION]'
}
},
'rax_auth_v1': {
'os_auth_version': 'v1.0',
'os_auth_url': 'https://identity.api.rackspacecloud.com/v1.0',
'args': {
'commands': [
'--rax-auth-v1'
],
'action': 'store_true',
'help': 'Authentication Plugin for Rackspace Cloud V1'
}
},
'os_rax_auth_lon': {
'os_auth_url': 'https://lon.identity.api.rackspacecloud.com/'
'v2.0/tokens',
'os_prefix': {
'os_apikey': 'RAX-KSKEY:apiKeyCredentials',
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-rax-auth-lon'
],
'choices': [
'lon'
],
'help': 'Authentication Plugin for Rackspace Cloud'
' env[OS_RAX_AUTH_LON]',
'default': os.environ.get('OS_RAX_AUTH_LON', None),
'metavar': '[REGION]'
}
},
'os_hp_auth': {
'os_auth_url': 'https://%(region)s.identity.hpcloudsvc.com:35357/'
'v2.0/tokens',
'os_prefix': {
'os_password': 'passwordCredentials'
},
'args': {
'commands': [
'--os-hp-auth'
],
'choices': [
'region-b.geo-1',
'region-a.geo-1'
],
'help': 'Authentication Plugin for HP Cloud'
' env[OS_HP_AUTH]',
'default': os.environ.get('OS_HP_AUTH', None),
'metavar': '[REGION]'
}
}
}
if auth_plugins:
__auth_plugins__.update(auth_plugins)
return __auth_plugins__ |
java | public Session startSshSessionAndObtainSession() {
Session session = null;
try {
JSch jsch = new JSch();
if (sshMeta.getSshLoginType() == SshLoginType.KEY) {
String workingDir = System.getProperty("user.dir");
String privKeyAbsPath = workingDir + "/"
+ sshMeta.getPrivKeyRelativePath();
logger.debug("use privkey: path: " + privKeyAbsPath);
if (!PcFileNetworkIoUtils.isFileExist(privKeyAbsPath)) {
throw new RuntimeException("file not found at "
+ privKeyAbsPath);
}
if (sshMeta.isPrivKeyUsePassphrase()
&& sshMeta.getPassphrase() != null) {
jsch.addIdentity(privKeyAbsPath, sshMeta.getPassphrase());
} else {
jsch.addIdentity(privKeyAbsPath);
}
}
session = jsch.getSession(sshMeta.getUserName(), targetHost,
sshMeta.getSshPort());
if (sshMeta.getSshLoginType() == SshLoginType.PASSWORD) {
session.setPassword(sshMeta.getPassword());
}
session.setConfig("StrictHostKeyChecking", "no");
} catch (Exception t) {
throw new RuntimeException(t);
}
return session;
} |
python | def send(self, agent_id, user_ids, party_ids='',
tag_ids='', msg=None):
"""
通用的消息发送接口。msg 内需要指定 msgtype 和对应类型消息必须的字段。
如果部分接收人无权限或不存在,发送仍然执行,但会返回无效的部分(即invaliduser或invalidparty或invalidtag),常见的原因是接收人不在应用的可见范围内。
user_ids、party_ids、tag_ids 不能同时为空,后面不再强调。
:param agent_id: 必填,企业应用的id,整型。可在应用的设置页面查看。
:param user_ids: 成员ID列表。
:param party_ids: 部门ID列表。
:param tag_ids: 标签ID列表。
:param msg: 发送消息的 dict 对象
:type msg: dict | None
:return: 接口调用结果
"""
msg = msg or {}
if isinstance(user_ids, (tuple, list)):
user_ids = '|'.join(user_ids)
if isinstance(party_ids, (tuple, list)):
party_ids = '|'.join(party_ids)
if isinstance(tag_ids, (tuple, list)):
tag_ids = '|'.join(tag_ids)
data = {
'touser': user_ids,
'toparty': party_ids,
'totag': tag_ids,
'agentid': agent_id
}
data.update(msg)
return self._post('message/send', data=data) |
java | public static java.util.List<com.liferay.commerce.model.CommerceCountry> getCommerceCountriesByUuidAndCompanyId(
String uuid, long companyId) {
return getService()
.getCommerceCountriesByUuidAndCompanyId(uuid, companyId);
} |
java | public static <T, R1, R> CompletableFuture<R> forEach2(CompletableFuture<? extends T> value1, Function<? super T, CompletableFuture<R1>> value2,
BiFunction<? super T, ? super R1, ? extends R> yieldingFunction) {
return value1.thenCompose(in -> {
CompletableFuture<R1> a = value2.apply(in);
return a.thenApply(ina -> yieldingFunction.apply(in, ina));
});
} |
java | public double continueToMargin(double[] origin, double[] delta) {
assert (delta.length == 2 && origin.length == 2);
double factor = Double.POSITIVE_INFINITY;
if(delta[0] > 0) {
factor = Math.min(factor, (maxx - origin[0]) / delta[0]);
}
else if(delta[0] < 0) {
factor = Math.min(factor, (origin[0] - minx) / -delta[0]);
}
if(delta[1] > 0) {
factor = Math.min(factor, (maxy - origin[1]) / delta[1]);
}
else if(delta[1] < 0) {
factor = Math.min(factor, (origin[1] - miny) / -delta[1]);
}
return factor;
} |
python | def _save_stats(self, epoch_data: EpochData) -> None:
"""
Extend ``epoch_data`` by stream:variable:aggreagation data.
:param epoch_data: data source from which the statistics are computed
"""
for stream_name in epoch_data.keys():
for variable, aggregations in self._variable_aggregations.items():
# variables are already checked in the AccumulatingHook; hence, we do not check them here
epoch_data[stream_name][variable] = OrderedDict(
{aggr: ComputeStats._compute_aggregation(aggr, self._accumulator[stream_name][variable])
for aggr in aggregations}) |
java | public final void entryRuleOpSingleAssign() throws RecognitionException {
try {
// InternalXbaseWithAnnotations.g:234:1: ( ruleOpSingleAssign EOF )
// InternalXbaseWithAnnotations.g:235:1: ruleOpSingleAssign EOF
{
if ( state.backtracking==0 ) {
before(grammarAccess.getOpSingleAssignRule());
}
pushFollow(FOLLOW_1);
ruleOpSingleAssign();
state._fsp--;
if (state.failed) return ;
if ( state.backtracking==0 ) {
after(grammarAccess.getOpSingleAssignRule());
}
match(input,EOF,FOLLOW_2); if (state.failed) return ;
}
}
catch (RecognitionException re) {
reportError(re);
recover(input,re);
}
finally {
}
return ;
} |
java | public static Observable<Intent> fromBroadcast(Context context, IntentFilter intentFilter) {
return fromBroadcast(context, intentFilter, NO_OP_ORDERED_BROADCAST_STRATEGY);
} |
java | public static Number toNumber(Object value, Number defaultValue) {
return convert(Number.class, value, defaultValue);
} |
python | def get_macs(vm_, **kwargs):
'''
Return a list off MAC addresses from the named vm
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.get_macs <domain>
'''
doc = ElementTree.fromstring(get_xml(vm_, **kwargs))
return [node.get('address') for node in doc.findall('devices/interface/mac')] |
python | def clear_cache(self):
"""
Clears any cache associated with the serial model and the engines
seen by the direct view.
"""
self.underlying_model.clear_cache()
try:
logger.info('DirectView results has {} items. Clearing.'.format(
len(self._dv.results)
))
self._dv.purge_results('all')
if self._purge_client:
self._dv.client.purge_everything()
except:
pass |
java | @NotNull
public static byte[] decode(byte[] source, int off, int len)
throws Base64DecoderException {
return decode(source, off, len, DECODABET);
} |
java | public String getAvatarHash() {
byte[] bytes = getAvatar();
if (bytes == null) {
return null;
}
MessageDigest digest;
try {
digest = MessageDigest.getInstance("SHA-1");
}
catch (NoSuchAlgorithmException e) {
LOGGER.log(Level.SEVERE, "Failed to get message digest", e);
return null;
}
digest.update(bytes);
return StringUtils.encodeHex(digest.digest());
} |
python | def create_assign_context_menu(self):
"""
Create a context menu, then set the created QMenu as the context menu.
This builds the menu with all required actions and signal-slot connections.
"""
menu = QMenu("AutoKey")
self._build_menu(menu)
self.setContextMenu(menu) |
java | public Request delete(String roleId) {
Asserts.assertNotNull(roleId, "role id");
final String url = baseUrl
.newBuilder()
.addEncodedPathSegments("api/v2/roles")
.addEncodedPathSegments(roleId)
.build()
.toString();
VoidRequest request = new VoidRequest(this.client, url, "DELETE");
request.addHeader("Authorization", "Bearer " + apiToken);
return request;
} |
java | @Override
public Double getLastInstanceHourDiskWrite(String instanceId) {
Dimension instanceDimension = new Dimension().withName("InstanceId")
.withValue(instanceId);
GetMetricStatisticsRequest request = new GetMetricStatisticsRequest()
.withMetricName("DiskWriteBytes")
.withNamespace("AWS/EC2")
.withPeriod(60 * 60)
// one hour
.withDimensions(instanceDimension)
// to get metrics a specific
// instance
.withStatistics("Average")
.withStartTime(DateTime.now().minusHours(1).toDate())
.withEndTime(new Date());
GetMetricStatisticsResult result = cloudWatchClient
.getMetricStatistics(request);
// to read data
List<Datapoint> datapoints = result.getDatapoints();
if (CollectionUtils.isEmpty(datapoints)) return 0.0;
Datapoint datapoint = datapoints.get(0);
return datapoint.getAverage();
} |
python | def register_jvm_tool(cls,
register,
key,
classpath_spec=None,
main=None,
custom_rules=None,
fingerprint=True,
classpath=None,
help=None,
removal_version=None,
removal_hint=None):
"""Registers a jvm tool under `key` for lazy classpath resolution.
Classpaths can be retrieved in `execute` scope via `tool_classpath_from_products`.
NB: If the tool's `main` class name is supplied the tool classpath will be shaded.
:param register: A function that can register options with the option system.
:param unicode key: The key the tool configuration should be registered under.
:param unicode classpath_spec: The tool classpath target address spec that can be used to
override this tool's classpath; by default, `//:[key]`.
:param unicode main: The fully qualified class name of the tool's main class if shading of the
tool classpath is desired.
:param list custom_rules: An optional list of `Shader.Rule`s to apply before the automatically
generated binary jar shading rules. This is useful for excluding
classes shared between the tool and the code it runs over. The
canonical example is the `org.junit.Test` annotation read by junit
runner tools from user code. In this sort of case the shared code must
have a uniform name between the tool and the user code and so the
shared code must be excluded from shading.
:param bool fingerprint: Indicates whether to include the jvm tool in the task's fingerprint.
Note that unlike for other options, fingerprinting is enabled for tools
by default.
:param list classpath: A list of one or more `JarDependency` objects that form this tool's
default classpath. If the classpath is optional, supply an empty list;
otherwise the default classpath of `None` indicates the `classpath_spec`
must point to a target defined in a BUILD file that provides the tool
classpath.
:param unicode help: An optional custom help string; otherwise a reasonable one is generated.
:param string removal_version: A semver at which this tool will be removed.
:param unicode removal_hint: A hint on how to migrate away from this tool.
"""
def formulate_help():
if classpath:
return ('Target address spec for overriding the classpath of the {} jvm tool which is, '
'by default: {}'.format(key, classpath))
else:
return 'Target address spec for specifying the classpath of the {} jvm tool.'.format(key)
help = help or formulate_help()
register('--{}'.format(key),
advanced=True,
type=target_option,
default='//:{}'.format(key) if classpath_spec is None else classpath_spec,
help=help,
fingerprint=fingerprint,
removal_version=removal_version,
removal_hint=removal_hint)
# TODO(John Sirois): Move towards requiring tool specs point to jvm_binary targets.
# These already have a main and are a natural place to house any custom shading rules. That
# would eliminate the need to pass main and custom_rules here.
# It is awkward that jars can no longer be inlined as dependencies - this will require 2 targets
# for every tool - the jvm_binary, and a jar_library for its dependencies to point to. It may
# be worth creating a JarLibrary subclass - say JarBinary, or else mixing in a Binary interface
# to JarLibrary to endow it with main and shade_rules attributes to allow for single-target
# definition of resolvable jvm binaries.
jvm_tool = cls.JvmTool(register.scope, key, classpath, main, custom_rules)
JvmToolMixin._jvm_tools.append(jvm_tool) |
java | public DescribeElasticLoadBalancersResult withElasticLoadBalancers(ElasticLoadBalancer... elasticLoadBalancers) {
if (this.elasticLoadBalancers == null) {
setElasticLoadBalancers(new com.amazonaws.internal.SdkInternalList<ElasticLoadBalancer>(elasticLoadBalancers.length));
}
for (ElasticLoadBalancer ele : elasticLoadBalancers) {
this.elasticLoadBalancers.add(ele);
}
return this;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.