language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @XmlElement(name = "pageRequest", required = true)
@JsonProperty(value = "pageRequest", required = true)
@ApiModelProperty(value = "The page request.", position = 1, required = true)
@Override
public PageRequestDto getPageRequest() {
if (this.pageRequest == null) {
this.pageRequest = new PageRequestDto();
}
return pageRequest;
} |
python | def memory_map(self):
"""! @brief MemoryMap object."""
# Lazily construct the memory map.
if self._memory_map is None:
self._build_memory_regions()
self._build_flash_regions()
# Warn if there was no boot memory.
if not self._saw_startup:
LOG.warning("CMSIS-Pack device %s has no identifiable boot memory", self.part_number)
self._memory_map = MemoryMap(self._regions)
return self._memory_map |
python | def _debug_mode_responses(self, request, response):
"""Extra functionality available in debug mode.
- If pretty printed output was requested, force the content type to text. This
causes the browser to not try to format the output in any way.
- If SQL profiling is turned on, return a page with SQL query timing
information instead of the actual response.
"""
if django.conf.settings.DEBUG_GMN:
if 'pretty' in request.GET:
response['Content-Type'] = d1_common.const.CONTENT_TYPE_TEXT
if (
'HTTP_VENDOR_PROFILE_SQL' in request.META
or django.conf.settings.DEBUG_PROFILE_SQL
):
response_list = []
for query in django.db.connection.queries:
response_list.append('{}\n{}'.format(query['time'], query['sql']))
return django.http.HttpResponse(
'\n\n'.join(response_list), d1_common.const.CONTENT_TYPE_TEXT
)
return response |
python | def standard_settings(self):
"""Sets up standard settings for a nice visualization."""
cmd.set('bg_rgb', [1.0, 1.0, 1.0]) # White background
cmd.set('depth_cue', 0) # Turn off depth cueing (no fog)
cmd.set('cartoon_side_chain_helper', 1) # Improve combined visualization of sticks and cartoon
cmd.set('cartoon_fancy_helices', 1) # Nicer visualization of helices (using tapered ends)
cmd.set('transparency_mode', 1) # Turn on multilayer transparency
cmd.set('dash_radius', 0.05)
self.set_custom_colorset() |
java | @Override
protected void prepareActions() throws QTasteTestFailException {
mIndex = Integer.parseInt(mData[0].toString());
if (component instanceof ComboBox) {
ComboBox<?> combo = (ComboBox<?>) component;
if (combo.getItems().size() < mIndex) {
throw new QTasteTestFailException("Specified index is out of bounds");
}
} else if (component instanceof ListView) {
ListView<?> list = (ListView<?>) component;
if (list.getItems().size() < mIndex) {
throw new QTasteTestFailException("Specified index is out of bounds");
}
} else {
throw new QTasteTestFailException("Unsupported component");
}
} |
java | public JsonObject toJson() {
JsonObject body = new JsonObject();
body.add(JsonRpcProtocol.ID, id());
if (isError()) {
body.add(JsonRpcProtocol.ERROR, error().toJson());
} else {
body.add(JsonRpcProtocol.RESULT, result());
}
return body;
} |
java | public static int searchLast(long[] longArray, long value, int occurrence) {
if(occurrence <= 0 || occurrence > longArray.length) {
throw new IllegalArgumentException("Occurrence must be greater or equal to 1 and less than "
+ "the array length: " + occurrence);
}
int valuesSeen = 0;
for(int i = longArray.length-1; i >=0; i--) {
if(longArray[i] == value) {
valuesSeen++;
if(valuesSeen == occurrence) {
return i;
}
}
}
return -1;
} |
python | def event_handlers(self):
"""
The list of handlers registered for this node.
If the node is not a `Flow` and does not have its own list of
`handlers` the handlers registered at the level of the flow are returned.
This trick allows one to registered different handlers at the level of the Task
for testing purposes. By default, we have a common list of handlers for all the nodes in the flow.
This choice facilitates the automatic installation of the handlers when we use callbacks to generate
new Works and Tasks!
"""
if self.is_flow:
return self._event_handlers
try:
return self._event_handlers
except AttributeError:
return self.flow._event_handlers |
java | static Path resolvePath(final Path base, final String... paths) {
return Paths.get(base.toString(), paths);
} |
python | def load_model(file_path):
"""
Loads an ONNX model to a ProtoBuf object.
:param file_path: ONNX file (full file name)
:return: ONNX model.
Example:
::
from onnxmltools.utils import load_model
onnx_model = load_model("SqueezeNet.onnx")
"""
if not path.exists(file_path):
raise FileNotFoundError("{0} was not found.".format(file_path))
model = onnx_proto.ModelProto()
with open(file_path, 'rb') as f:
model.ParseFromString(f.read())
return model |
python | def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
values = {}
title = "Description"
description = self.command.description + "\n\n" + self.get_ending_note() if not self.is_cog() else inspect.getdoc(self.command)
sections = []
if isinstance(self.command, Command):
description = self.command.short_doc
sections = [{"name": "Usage", "value": self.get_command_signature()},
{"name": "More Info", "value": self.command.help.replace(self.command.short_doc, "").format(prefix=self.clean_prefix),
"inline": False}]
def category(tup):
cog = tup[1].cog_name
return cog + ':' if cog is not None else '\u200bNo Category:'
if self.is_bot():
title = self.bot.user.display_name + " Help"
data = sorted(self.filter_command_list(), key=category)
for category, commands in itertools.groupby(data, key=category):
section = {}
commands = list(commands)
if len(commands) > 0:
section['name'] = category
section['value'] = self.add_commands(commands)
section['inline'] = False
sections.append(section)
elif not sections or self.has_subcommands():
section = {"name": "Commands:", "inline": False, "value": self.add_commands(self.filter_command_list())}
sections.append(section)
values['title'] = title
values['description'] = description
values['sections'] = sections
return values |
python | def get_filters(self, request, **resources):
""" Make filters from GET variables.
:return dict: filters
"""
filters = dict()
if not self._meta.fields:
return filters
for field in request.GET.iterkeys():
tokens = field.split(LOOKUP_SEP)
field_name = tokens[0]
if field_name not in self._meta.fields:
continue
exclude = False
if tokens[-1] == 'not':
exclude = True
tokens.pop()
value = request.GET.getlist(field)
if len(tokens) == 1:
value = map(self._meta.model._meta.get_field(field_name).to_python, value)
if len(value) > 1:
tokens.append('in')
else:
value = value.pop()
filters[LOOKUP_SEP.join(tokens)] = (value, exclude)
return filters |
python | def is_try(task, source_env_prefix):
"""Determine if a task is a 'try' task (restricted privs).
This goes further than get_repo. We may or may not want
to keep this.
This checks for the following things::
* ``task.payload.env.GECKO_HEAD_REPOSITORY`` == "https://hg.mozilla.org/try/"
* ``task.payload.env.MH_BRANCH`` == "try"
* ``task.metadata.source`` == "https://hg.mozilla.org/try/..."
* ``task.schedulerId`` in ("gecko-level-1", )
Args:
task (dict): the task definition to check
source_env_prefix (str): The environment variable prefix that is used
to get repository information.
Returns:
bool: True if it's try
"""
# If get_repo() returns None, then _is_try_url() doesn't manage to process the URL
repo = get_repo(task, source_env_prefix) or ''
return any((
task['schedulerId'] in ('gecko-level-1', ),
'try' in _extract_from_env_in_payload(task, 'MH_BRANCH', default=''),
_is_try_url(repo),
_is_try_url(task['metadata'].get('source', '')),
)) |
java | public static String[] expandArguments(String[] args) throws Exception {
List<String> options = new ArrayList<String>();
for (int i = 0; i < args.length; i++) {
if (args[i].equals(OPTIONS_FILE)) {
if (i == args.length - 1) {
throw new Exception("Missing options file");
}
String fileName = args[++i];
File optionsFile = new File(fileName);
BufferedReader reader = null;
StringBuilder buffer = new StringBuilder();
try {
reader = new BufferedReader(new FileReader(optionsFile));
String nextLine = null;
while ((nextLine = reader.readLine()) != null) {
nextLine = nextLine.trim();
if (nextLine.length() == 0 || nextLine.startsWith("#")) {
// empty line or comment
continue;
}
buffer.append(nextLine);
if (nextLine.endsWith("\\")) {
if (buffer.charAt(0) == '\'' || buffer.charAt(0) == '"') {
throw new Exception(
"Multiline quoted strings not supported in file("
+ fileName + "): " + buffer.toString());
}
// Remove the trailing back-slash and continue
buffer.deleteCharAt(buffer.length() - 1);
} else {
// The buffer contains a full option
options.add(
removeQuotesEncolosingOption(fileName, buffer.toString()));
buffer.delete(0, buffer.length());
}
}
// Assert that the buffer is empty
if (buffer.length() != 0) {
throw new Exception("Malformed option in options file("
+ fileName + "): " + buffer.toString());
}
} catch (IOException ex) {
throw new Exception("Unable to read options file: " + fileName, ex);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException ex) {
LOG.info("Exception while closing reader", ex);
}
}
}
} else {
// Regular option. Parse it and put it on the appropriate list
options.add(args[i]);
}
}
return options.toArray(new String[options.size()]);
} |
python | def add_developer_certificate(self, name, **kwargs):
"""Add a new developer certificate.
:param str name: name of the certificate (Required)
:param str description: Human readable description of this certificate,
not longer than 500 characters.
:returns: Certificate object
:rtype: Certificate
"""
kwargs['name'] = name
api = self._get_api(cert.DeveloperCertificateApi)
certificate = Certificate._create_request_map(kwargs)
# just pull the fields we care about
subset = cert.DeveloperCertificateRequestData.attribute_map
certificate = {k: v for k, v in certificate.items() if k in subset}
body = cert.DeveloperCertificateRequestData(**certificate)
dev_cert = api.create_developer_certificate(self.auth, body)
return self.get_certificate(dev_cert.id) |
python | def apply_translation(self, translation):
"""
Translate the current mesh.
Parameters
----------
translation : (3,) float
Translation in XYZ
"""
translation = np.asanyarray(translation, dtype=np.float64)
if translation.shape != (3,):
raise ValueError('Translation must be (3,)!')
matrix = np.eye(4)
matrix[:3, 3] = translation
self.apply_transform(matrix) |
python | def noun_phrases_as_tokens(text):
'''Generate a bag of lists of unnormalized tokens representing noun
phrases from ``text``.
This is built around python's nltk library for getting Noun
Phrases (NPs). This is all documented in the NLTK Book
http://www.nltk.org/book/ch03.html and blog posts that cite the
book.
:rtype: list of lists of strings
'''
## from NLTK Book:
sentence_re = r'''(?x) # set flag to allow verbose regexps
([A-Z])(\.[A-Z])+\.? # abbreviations, e.g. U.S.A.
| \w+(-\w+)* # words with optional internal hyphens
| \$?\d+(\.\d+)?%? # currency and percentages, e.g. $12.40, 82%
| \.\.\. # ellipsis
| [][.,;"'?():-_`] # these are separate tokens
'''
## From Su Nam Kim paper:
## http://www.comp.nus.edu.sg/~kanmy/papers/10.1007_s10579-012-9210-3.pdf
grammar = r'''
NBAR:
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
'''
if len(text.strip()) == 0:
return []
chunker = nltk.RegexpParser(grammar)
toks = nltk.regexp_tokenize(text, sentence_re)
postoks = nltk.tag.pos_tag(toks)
#print postoks
tree = chunker.parse(postoks)
stops = stopwords.words('english')
stops += dossier_stopwords()
## These next four functions are standard uses of NLTK illustrated by
## http://alexbowe.com/au-naturale/
## https://gist.github.com/alexbowe/879414
def leaves(tree):
'''Finds NP (nounphrase) leaf nodes of a chunk tree.'''
for subtree in tree.subtrees(filter = lambda t: t.label()=='NP'):
yield subtree.leaves()
def acceptable_word(word):
'''Checks conditions for acceptable word: length, stopword.'''
return 2 <= len(word) <= 40 and word.lower() not in stops
def get_terms(tree):
for leaf in leaves(tree):
yield [w for w,t in leaf if acceptable_word(w)]
return list(get_terms(tree)) |
python | def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action() |
java | @XmlElementDecl(namespace = "urn:oasis:names:tc:xacml:2.0:policy:schema:os", name = "Policy")
public JAXBElement<PolicyType> createPolicy(PolicyType value) {
return new JAXBElement<PolicyType>(_Policy_QNAME, PolicyType.class, null, value);
} |
java | public void marshall(ResourceInventory resourceInventory, ProtocolMarshaller protocolMarshaller) {
if (resourceInventory == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(resourceInventory.getResourceId(), RESOURCEID_BINDING);
protocolMarshaller.marshall(resourceInventory.getResourceType(), RESOURCETYPE_BINDING);
protocolMarshaller.marshall(resourceInventory.getResourceArn(), RESOURCEARN_BINDING);
protocolMarshaller.marshall(resourceInventory.getPlatform(), PLATFORM_BINDING);
protocolMarshaller.marshall(resourceInventory.getPlatformVersion(), PLATFORMVERSION_BINDING);
protocolMarshaller.marshall(resourceInventory.getResourceOwningAccountId(), RESOURCEOWNINGACCOUNTID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = "logs", dry_run = False, verbosity = 0, stop_on_failure = False, **kwargs):
"""Submits a job that will be executed in the grid."""
# add job to database
self.lock()
job = add_job(self.session, command_line, name, dependencies, array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure, context=self.context, **kwargs)
logger.info("Added job '%s' to the database." % job)
if dry_run:
print("Would have added the Job")
print(job)
print("to the database to be executed in the grid with options:", str(kwargs))
self.session.delete(job)
logger.info("Deleted job '%s' from the database due to dry-run option" % job)
job_id = None
else:
job_id = self._submit_to_grid(job, name, array, dependencies, log_dir, verbosity, **kwargs)
self.session.commit()
self.unlock()
return job_id |
python | def choose_database_name(metadata, config):
"""
Choose the database name to use.
As a default, databases should be named after the service that uses them. In addition,
database names should be different between unit testing and runtime so that there is
no chance of a unit test dropping a real database by accident.
"""
if config.database_name is not None:
# we allow -- but do not encourage -- database name configuration
return config.database_name
if metadata.testing:
# by convention, we provision different databases for unit testing and runtime
return f"{metadata.name}_test_db"
return f"{metadata.name}_db" |
python | def FromStream(cls, stream):
"""Create a DataStreamSelector from a DataStream.
Args:
stream (DataStream): The data stream that we want to convert.
"""
if stream.system:
specifier = DataStreamSelector.MatchSystemOnly
else:
specifier = DataStreamSelector.MatchUserOnly
return DataStreamSelector(stream.stream_type, stream.stream_id, specifier) |
java | Collection<LifecycleQueryInstalledChaincodesProposalResponse> lifecycleQueryInstalledChaincodes(LifecycleQueryInstalledChaincodesRequest lifecycleQueryInstalledChaincodesRequest, Collection<Peer> peers) throws InvalidArgumentException, ProposalException {
logger.trace("LifecycleQueryInstalledChaincodes");
if (null == lifecycleQueryInstalledChaincodesRequest) {
throw new InvalidArgumentException("The lifecycleQueryInstalledChaincodesRequest parameter can not be null.");
}
checkPeers(peers);
if (!isSystemChannel()) {
throw new InvalidArgumentException("LifecycleQueryInstalledChaincodes should only be invoked on system channel.");
}
try {
TransactionContext context = getTransactionContext(lifecycleQueryInstalledChaincodesRequest);
FabricProposal.Proposal proposalBuilder = LifecycleQueryInstalledChaincodesBuilder.newBuilder().context(context).build();
SignedProposal qProposal = getSignedProposal(context, proposalBuilder);
return sendProposalToPeers(peers, qProposal, context, LifecycleQueryInstalledChaincodesProposalResponse.class);
} catch (ProposalException e) {
throw e;
} catch (Exception e) {
throw new ProposalException(format("Query for peer %s channels failed. " + e.getMessage(), name), e);
}
} |
python | def user_stats(request):
"""
JSON of user stats of the user
GET parameters:
html (bool):
turn on the HTML version of the API, defaults to false
user (int):
identifier of the user, defaults to logged user
concepts (list):
list of identifiers of concepts, defaults to all concepts
lang (str):
language of requested concepts, defaults to language from django
"""
user = get_user_id(request)
language = get_language(request)
concepts = None # meaning all concept
if "concepts" in request.GET:
concepts = Concept.objects.filter(lang=language, active=True,
identifier__in=load_query_json(request.GET, "concepts"))
data = UserStat.objects.get_user_stats(user, language, concepts)
return render_json(request, data, template='concepts_json.html', help_text=user_stats.__doc__) |
java | @Override
public EClass getIfcColourSpecification() {
if (ifcColourSpecificationEClass == null) {
ifcColourSpecificationEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI)
.getEClassifiers().get(103);
}
return ifcColourSpecificationEClass;
} |
python | def add_upsert(self, action, meta_action, doc_source, update_spec):
"""
Function which stores sources for "insert" actions
and decide if for "update" action has to add docs to
get source buffer
"""
# Whenever update_spec is provided to this method
# it means that doc source needs to be retrieved
# from Elasticsearch. It means also that source
# is not stored in local buffer
if update_spec:
self.bulk_index(action, meta_action)
# -1 -> to get latest index number
# -1 -> to get action instead of meta_action
# Update document based on source retrieved from ES
self.add_doc_to_update(action, update_spec, len(self.action_buffer) - 2)
else:
# Insert and update operations provide source
# Store it in local buffer and use for comming updates
# inside same buffer
# add_to_sources will not be called for delete operation
# as it does not provide doc_source
if doc_source:
self.add_to_sources(action, doc_source)
self.bulk_index(action, meta_action) |
java | @ShellMethod(key = "generate-anonymous-user", value = "Generate an anonymous (persistent) username identifier")
public void generateUsername(
@ShellOption(value = {"username"},
help = "Authenticated username") final String username,
@ShellOption(value = {"service"},
help = "Service application URL for which CAS may generate the identifier") final String service,
@ShellOption(value = {"salt"},
help = "Salt used to generate and encode the anonymous identifier") final String salt) {
val generator = new ShibbolethCompatiblePersistentIdGenerator(salt);
val id = generator.generate(username, service);
LOGGER.info("Generated identifier:\n[{}]", id);
} |
java | private void confirmMatchForDsTypeModel(ValidationResult result,
DsTypeModel typeModel,
String contentModelPid,
ObjectInfo object) {
String id = typeModel.getId();
DatastreamInfo dsInfo = object.getDatastreamInfo(id);
if (dsInfo == null) {
// If there is no datastream by that name, nothing to check.
result.addNote(ValidationResultNotation
.noMatchingDatastreamId(contentModelPid, id));
return;
}
Collection<Form> forms = typeModel.getForms();
if (forms.isEmpty()) {
// If the type model has no forms, it's an automatic match.
return;
}
// Otherwise, the datastream must meet the constraints of at least one form.
for (Form form : forms) {
if (meetsConstraint(dsInfo.getMimeType(), form.getMimeType())
&& meetsConstraint(dsInfo.getFormatUri(), form
.getFormatUri())) {
return;
}
}
result.addNote(ValidationResultNotation
.datastreamDoesNotMatchForms(contentModelPid, id));
} |
python | def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding)) |
python | def response_type_is_in_registered_response_types(provider, authentication_request):
"""
Verifies that the requested response type is allowed for the client making the request.
:param provider: provider instance
:param authentication_request: authentication request to verify
:raise InvalidAuthenticationRequest: if the response type is not allowed
"""
error = InvalidAuthenticationRequest('Response type is not registered',
authentication_request,
oauth_error='invalid_request')
try:
allowed_response_types = provider.clients[authentication_request['client_id']]['response_types']
except KeyError as e:
logger.error('client metadata is missing response_types')
raise error
if not is_allowed_response_type(authentication_request['response_type'], allowed_response_types):
logger.error('Response type \'{}\' is not registered'.format(' '.join(authentication_request['response_type'])))
raise error |
python | def origin(self):
"""Return an URL with scheme, host and port parts only.
user, password, path, query and fragment are removed.
"""
# TODO: add a keyword-only option for keeping user/pass maybe?
if not self.is_absolute():
raise ValueError("URL should be absolute")
if not self._val.scheme:
raise ValueError("URL should have scheme")
v = self._val
netloc = self._make_netloc(None, None, v.hostname, v.port, encode=False)
val = v._replace(netloc=netloc, path="", query="", fragment="")
return URL(val, encoded=True) |
python | def extend(self, other):
"""
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
"""
overlap = [key for key in other.defaults if key in self.defaults]
if overlap:
raise ValueError(
"Duplicate hyperparameter(s): %s" % " ".join(overlap))
new = dict(self.defaults)
new.update(other.defaults)
return HyperparameterDefaults(**new) |
python | def stringToTextValues(s, listSeparator=',', charList=None, strict=False):
"""
Returns list of strings.
"""
if charList is None:
charList = escapableCharList
def escapableChar (c):
return c in charList
def error(msg):
if strict:
raise ParseError(msg)
else:
logging.error(msg)
# vars which control state machine
charIterator = enumerate(s)
state = "read normal"
current = []
results = []
while True:
try:
charIndex, char = next(charIterator)
except:
char = "eof"
if state == "read normal":
if char == '\\':
state = "read escaped char"
elif char == listSeparator:
state = "read normal"
current = "".join(current)
results.append(current)
current = []
elif char == "eof":
state = "end"
else:
state = "read normal"
current.append(char)
elif state == "read escaped char":
if escapableChar(char):
state = "read normal"
if char in 'nN':
current.append('\n')
else:
current.append(char)
else:
state = "read normal"
# leave unrecognized escaped characters for later passes
current.append('\\' + char)
elif state == "end": # an end state
if len(current) or len(results) == 0:
current = "".join(current)
results.append(current)
return results
elif state == "error": # an end state
return results
else:
state = "error"
error("unknown state: '{0!s}' reached in {1!s}".format(state, s)) |
python | def matrix_transpose(m):
""" Transposes the input matrix.
The input matrix :math:`m` is a 2-dimensional array.
:param m: input matrix with dimensions :math:`(n \\times m)`
:type m: list, tuple
:return: transpose matrix with dimensions :math:`(m \\times n)`
:rtype: list
"""
num_cols = len(m)
num_rows = len(m[0])
m_t = []
for i in range(num_rows):
temp = []
for j in range(num_cols):
temp.append(m[j][i])
m_t.append(temp)
return m_t |
java | @SuppressWarnings("unchecked")
private void prepareListeners() {
if (preparedListeners != null && preparedUnspecifiedListeners != null) {
// Already created, skip
return;
}
preparedListeners = new ConcurrentHashMap<>();
Stream<Class<? extends GloballyAttachableListener>> eventTypes = Stream.concat(
listeners.keySet().stream(),
Stream.concat(listenerSuppliers.keySet().stream(),
listenerFunctions.keySet().stream())
).distinct();
eventTypes.forEach(type -> {
ArrayList<Function<DiscordApi, GloballyAttachableListener>> typeListenerFunctions = new ArrayList<>();
listeners.getOrDefault(type, Collections.emptyList()).forEach(
listener -> typeListenerFunctions.add(api -> listener)
);
listenerSuppliers.getOrDefault(type, Collections.emptyList()).forEach(
supplier -> typeListenerFunctions.add(api -> supplier.get())
);
listenerFunctions.getOrDefault(type, Collections.emptyList()).forEach(
function -> typeListenerFunctions.add((Function<DiscordApi, GloballyAttachableListener>) function)
);
preparedListeners.put(type, typeListenerFunctions);
});
// Unspecified Listeners
preparedUnspecifiedListeners = new CopyOnWriteArrayList<>(unspecifiedListenerFunctions);
unspecifiedListenerSuppliers.forEach(supplier -> preparedUnspecifiedListeners.add((api) -> supplier.get()));
unspecifiedListeners.forEach(listener -> preparedUnspecifiedListeners.add((api) -> listener));
} |
python | def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs) |
python | def do_exec(self, filename):
"""
::
Usage:
exec FILENAME
executes the commands in the file. See also the script command.
Arguments:
FILENAME The name of the file
"""
if not filename:
Console.error("the command requires a filename as parameter")
return
if os.path.exists(filename):
with open(filename, "r") as f:
for line in f:
Console.ok("> {:}".format(str(line)))
self.onecmd(line)
else:
Console.error('file "{:}" does not exist.'.format(filename))
sys.exit() |
python | def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch) |
java | @Override
EntityManager getEMInvocationInfo(boolean requireTx, LockModeType mode)
{
final boolean isTraceOn = TraceComponent.isAnyTracingEnabled();
if (isTraceOn && tc.isEntryEnabled())
{
Tr.entry(tc, "getEMInvocationInfo : " + requireTx + " : " + ((mode == null) ? "null" : mode));
}
if (requireTx || (mode != null && !LockModeType.NONE.equals(mode)))
{
throw new UnsupportedOperationException("This entity manager cannot perform operations that require a transactional context");
}
if (!ivEnlisted)
{
UOWCoordinator uowCoord = ivAbstractJPAComponent.getUOWCurrent().getUOWCoord();
// Enlist in either the global tran or LTC
if (uowCoord.isGlobal())
{
// Register invocation object to transaction manager for clean up
registerEmInvocation(uowCoord, this); //d638095.2
}
else
{
// Register invocation object to LTC for clean up
LocalTransactionCoordinator ltCoord = (LocalTransactionCoordinator) uowCoord;
ltCoord.enlistSynchronization(this);
}
// Mark this em as enlisted for clean up
ivEnlisted = true;
}
if (isTraceOn && tc.isEntryEnabled())
Tr.exit(tc, "getEMInvocationInfo : " + ivEm);
return ivEm;
} |
java | static private void collectStats(double elapsedSec1,double elapsedSec2,List saved,List result,double[] stats)
{
stats[0] += elapsedSec1;
stats[1] += elapsedSec2;
Set fastVals = new HashSet();
Set slowVals = new HashSet();
for (int i=0; i<saved.size(); i++) {
fastVals.add( ((LookupResult)saved.get(i)).value );
}
stats[2] += fastVals.size();
for (int i=0; i<result.size(); i++) {
slowVals.add( ((LookupResult)result.get(i)).value );
}
stats[3] += slowVals.size();
if (fastVals.size()==slowVals.size()) stats[4]++;
} |
java | static ZoneRules generateZoneRules(String zoneId) {
TimeZone timeZone = TimeZone.getFrozenTimeZone(zoneId);
// Assumption #0
verify(timeZone instanceof OlsonTimeZone, zoneId,
"Unexpected time zone class " + timeZone.getClass());
OlsonTimeZone tz = (OlsonTimeZone) timeZone;
TimeZoneRule[] rules = tz.getTimeZoneRules();
// Assumption #1
InitialTimeZoneRule initial = (InitialTimeZoneRule) rules[0];
ZoneOffset baseStandardOffset = millisToOffset(initial.getRawOffset());
ZoneOffset baseWallOffset =
millisToOffset((initial.getRawOffset() + initial.getDSTSavings()));
List<ZoneOffsetTransition> standardOffsetTransitionList = new ArrayList<>();
List<ZoneOffsetTransition> transitionList = new ArrayList<>();
List<ZoneOffsetTransitionRule> lastRules = new ArrayList<>();
int preLastDstSavings = 0;
AnnualTimeZoneRule last1 = null;
AnnualTimeZoneRule last2 = null;
TimeZoneTransition transition = tz.getNextTransition(Long.MIN_VALUE, false);
int transitionCount = 1;
// This loop has two possible exit conditions (in normal operation):
// 1. for zones that end with a static value and have no ongoing DST changes, it will exit
// via the normal condition (transition != null)
// 2. for zones with ongoing DST changes (represented by a "final zone" in ICU4J, and by
// "last rules" in java.time) the "break transitionLoop" will be used to exit the loop.
transitionLoop:
while (transition != null) {
TimeZoneRule from = transition.getFrom();
TimeZoneRule to = transition.getTo();
boolean hadEffect = false;
if (from.getRawOffset() != to.getRawOffset()) {
standardOffsetTransitionList.add(new ZoneOffsetTransition(
TimeUnit.MILLISECONDS.toSeconds(transition.getTime()),
millisToOffset(from.getRawOffset()),
millisToOffset(to.getRawOffset())));
hadEffect = true;
}
int fromTotalOffset = from.getRawOffset() + from.getDSTSavings();
int toTotalOffset = to.getRawOffset() + to.getDSTSavings();
if (fromTotalOffset != toTotalOffset) {
transitionList.add(new ZoneOffsetTransition(
TimeUnit.MILLISECONDS.toSeconds(transition.getTime()),
millisToOffset(fromTotalOffset),
millisToOffset(toTotalOffset)));
hadEffect = true;
}
// Assumption #5
verify(hadEffect, zoneId, "Transition changed neither total nor raw offset.");
if (to instanceof AnnualTimeZoneRule) {
// The presence of an AnnualTimeZoneRule is taken as an indication of a final rule.
if (last1 == null) {
preLastDstSavings = from.getDSTSavings();
last1 = (AnnualTimeZoneRule) to;
// Assumption #4
verify(last1.getEndYear() == AnnualTimeZoneRule.MAX_YEAR, zoneId,
"AnnualTimeZoneRule is not permanent.");
} else {
last2 = (AnnualTimeZoneRule) to;
// Assumption #4
verify(last2.getEndYear() == AnnualTimeZoneRule.MAX_YEAR, zoneId,
"AnnualTimeZoneRule is not permanent.");
// Assumption #3
transition = tz.getNextTransition(transition.getTime(), false);
verify(transition.getTo() == last1, zoneId,
"Unexpected rule after 2 AnnualTimeZoneRules.");
break transitionLoop;
}
} else {
// Assumption #2
verify(last1 == null, zoneId, "Unexpected rule after AnnualTimeZoneRule.");
}
verify(transitionCount <= MAX_TRANSITIONS, zoneId,
"More than " + MAX_TRANSITIONS + " transitions.");
transition = tz.getNextTransition(transition.getTime(), false);
transitionCount++;
}
if (last1 != null) {
// Assumption #3
verify(last2 != null, zoneId, "Only one AnnualTimeZoneRule.");
lastRules.add(toZoneOffsetTransitionRule(last1, preLastDstSavings));
lastRules.add(toZoneOffsetTransitionRule(last2, last1.getDSTSavings()));
}
return ZoneRules.of(baseStandardOffset, baseWallOffset, standardOffsetTransitionList,
transitionList, lastRules);
} |
java | boolean addContext(boolean isRoot, Vector names, Mode mode) {
if (modeMap == null)
modeMap = new ContextMap();
return modeMap.put(isRoot, names, mode);
} |
java | public static <T extends MethodDescription> ElementMatcher.Junction<T> isOverriddenFromGeneric(ElementMatcher<? super TypeDescription.Generic> matcher) {
return new MethodOverrideMatcher<T>(matcher);
} |
java | @SuppressWarnings("PMD.AvoidDeeplyNestedIfStmts") // agreed PMD, fixme
private void clean(RallyCollector collector, List<RallyProject> existingProjects) {
Set<ObjectId> uniqueIDs = new HashSet<>();
for (com.capitalone.dashboard.model.Component comp : dbComponentRepository.findAll()) {
if (comp.getCollectorItems() != null && !comp.getCollectorItems().isEmpty()) {
List<CollectorItem> itemList = comp.getCollectorItems().get(CollectorType.AgileTool);
if (itemList != null) {
for (CollectorItem ci : itemList) {
if (ci != null && ci.getCollectorId().equals(collector.getId())) {
uniqueIDs.add(ci.getId());
}
}
}
}
}
List<RallyProject> stateChangeJobList = new ArrayList<>();
Set<ObjectId> udId = new HashSet<>();
udId.add(collector.getId());
for (RallyProject job : existingProjects) {
// collect the jobs that need to change state : enabled vs disabled.
if ((job.isEnabled() && !uniqueIDs.contains(job.getId())) || // if
// it
// was
// enabled
// but
// not
// on
// a
// dashboard
(!job.isEnabled() && uniqueIDs.contains(job.getId()))) { // OR
// it
// was
// disabled
// and
// now
// on
// a
// dashboard
job.setEnabled(uniqueIDs.contains(job.getId()));
stateChangeJobList.add(job);
}
}
if (!CollectionUtils.isEmpty(stateChangeJobList)) {
rallyProjectRepository.save(stateChangeJobList);
}
} |
python | def raw_chroma_accuracy(ref_voicing, ref_cent, est_voicing, est_cent,
cent_tolerance=50):
"""Compute the raw chroma accuracy given two pitch (frequency) sequences
in cents and matching voicing indicator sequences. The first pitch and
voicing arrays are treated as the reference (truth), and the second two as
the estimate (prediction). All 4 sequences must be of the same length.
Examples
--------
>>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt')
>>> est_time, est_freq = mir_eval.io.load_time_series('est.txt')
>>> (ref_v, ref_c,
... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time,
... ref_freq,
... est_time,
... est_freq)
>>> raw_chroma = mir_eval.melody.raw_chroma_accuracy(ref_v, ref_c,
... est_v, est_c)
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
ref_cent : np.ndarray
Reference pitch sequence in cents
est_voicing : np.ndarray
Estimated boolean voicing array
est_cent : np.ndarray
Estimate pitch sequence in cents
cent_tolerance : float
Maximum absolute deviation for a cent value to be considered correct
(Default value = 50)
Returns
-------
raw_chroma : float
Raw chroma accuracy, the fraction of voiced frames in ref_cent for
which est_cent provides a correct frequency values (within
cent_tolerance cents), ignoring octave errors
References
----------
.. [#] J. Salamon, E. Gomez, D. P. W. Ellis and G. Richard, "Melody
Extraction from Polyphonic Music Signals: Approaches, Applications
and Challenges", IEEE Signal Processing Magazine, 31(2):118-134,
Mar. 2014.
.. [#] G. E. Poliner, D. P. W. Ellis, A. F. Ehmann, E. Gomez, S.
Streich, and B. Ong. "Melody transcription from music audio:
Approaches and evaluation", IEEE Transactions on Audio, Speech, and
Language Processing, 15(4):1247-1256, 2007.
"""
validate_voicing(ref_voicing, est_voicing)
validate(ref_voicing, ref_cent, est_voicing, est_cent)
ref_voicing = ref_voicing.astype(bool)
est_voicing = est_voicing.astype(bool)
# When input arrays are empty, return 0 by special case
if ref_voicing.size == 0 or est_voicing.size == 0 \
or ref_cent.size == 0 or est_cent.size == 0:
return 0.
# If there are no voiced frames in reference, metric is 0
if ref_voicing.sum() == 0:
return 0.
# Raw chroma = same as raw pitch except that octave errors are ignored.
cent_diff = np.abs(ref_cent - est_cent)
octave = 1200*np.floor(cent_diff/1200.0 + 0.5)
matching_voicing = ref_voicing * (est_cent > 0)
cent_diff = np.abs(cent_diff - octave)[matching_voicing]
frame_correct = (cent_diff < cent_tolerance)
n_voiced = float(ref_voicing.sum())
raw_chroma = (frame_correct).sum()/n_voiced
return raw_chroma |
java | public void execute() {
try {
final IntermediateModel intermediateModel =
new IntermediateModelBuilder(models, codeGenBinDirectory).build();
// Dump the intermediate model to a file
writeIntermediateModel(intermediateModel);
emitCode(intermediateModel);
} catch (Exception e) {
throw new RuntimeException(
"Failed to generate code. Exception message : "
+ e.getMessage(), e);
}
} |
java | @SuppressWarnings("unchecked")
public void commitValue(CmsWidgetDialog dialog) throws CmsException {
if (m_baseCollection == null) {
PropertyUtilsBean bean = new PropertyUtilsBean();
ConvertUtilsBean converter = new ConvertUtilsBean();
Object value = null;
try {
Class<?> type = bean.getPropertyType(m_baseObject, m_baseObjectProperty);
value = converter.convert(m_value, type);
bean.setNestedProperty(m_baseObject, m_baseObjectProperty, value);
setError(null);
} catch (InvocationTargetException e) {
setError(e.getTargetException());
throw new CmsWidgetException(
Messages.get().container(
Messages.ERR_PROPERTY_WRITE_3,
value,
dialog.keyDefault(A_CmsWidget.getLabelKey(this), getKey()),
m_baseObject.getClass().getName()),
e.getTargetException(),
this);
} catch (Exception e) {
setError(e);
throw new CmsWidgetException(
Messages.get().container(
Messages.ERR_PROPERTY_WRITE_3,
value,
dialog.keyDefault(A_CmsWidget.getLabelKey(this), getKey()),
m_baseObject.getClass().getName()),
e,
this);
}
} else if (m_baseCollection instanceof SortedMap) {
if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(m_value)) {
int pos = m_value.indexOf('=');
if ((pos > 0) && (pos < (m_value.length() - 1))) {
String key = m_value.substring(0, pos);
// it is assumed strings starting or ending with white-spaces are faulty inputs
String value = m_value.substring(pos + 1).trim();
@SuppressWarnings("rawtypes")
SortedMap map = (SortedMap)m_baseCollection;
if (map.containsKey(key)) {
Object val = map.get(key);
CmsWidgetException error = new CmsWidgetException(
Messages.get().container(
Messages.ERR_MAP_DUPLICATE_KEY_3,
dialog.keyDefault(A_CmsWidget.getLabelKey(this), getKey()),
key,
val),
this);
setError(error);
throw error;
}
map.put(key, value);
} else {
CmsWidgetException error = new CmsWidgetException(
Messages.get().container(
Messages.ERR_MAP_PARAMETER_FORM_1,
dialog.keyDefault(A_CmsWidget.getLabelKey(this), getKey())),
this);
setError(error);
throw error;
}
}
} else if (m_baseCollection instanceof List) {
if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(m_value)) {
@SuppressWarnings("rawtypes")
List list = (List)m_baseCollection;
list.add(m_value);
}
}
} |
java | synchronized void startLogSegment(final long segmentTxId,
boolean writeHeaderTxn) throws IOException {
LOG.info("Starting log segment at " + segmentTxId);
if (segmentTxId < 0) {
throw new IOException("Bad txid: " + segmentTxId);
}
if (state != State.BETWEEN_LOG_SEGMENTS) {
throw new IOException("Bad state: " + state);
}
if (segmentTxId <= curSegmentTxId) {
throw new IOException("Cannot start writing to log segment "
+ segmentTxId + " when previous log segment started at "
+ curSegmentTxId);
}
if (segmentTxId != txid + 1) {
throw new IOException("Cannot start log segment at txid " + segmentTxId
+ " when next expected " + (txid + 1));
}
numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;
// TODO no need to link this back to storage anymore!
// See HDFS-2174.
storage.attemptRestoreRemovedStorage();
try {
editLogStream = journalSet.startLogSegment(segmentTxId);
} catch (IOException ex) {
throw new IOException("Unable to start log segment " + segmentTxId
+ ": no journals successfully started.");
}
curSegmentTxId = segmentTxId;
state = State.IN_SEGMENT;
if (writeHeaderTxn) {
logEdit(LogSegmentOp.getInstance(FSEditLogOpCodes.OP_START_LOG_SEGMENT));
logSync();
}
// force update of journal and image metrics
journalSet.updateJournalMetrics();
// If it is configured, we want to schedule an automatic edits roll
if (timeoutRollEdits > 0) {
FSNamesystem fsn = this.journalSet.getImage().getFSNamesystem();
if (fsn != null) {
// In some test cases fsn is NULL in images. Simply skip the feature.
AutomaticEditsRoller aer = fsn.automaticEditsRoller;
if (aer != null) {
aer.setNextRollTime(System.currentTimeMillis() + timeoutRollEdits);
} else {
LOG.warn("Automatic edits roll is enabled but the roller thread "
+ "is not enabled. Should only happen in unit tests.");
}
} else {
LOG.warn("FSNamesystem is NULL in FSEditLog.");
}
}
} |
java | public void visitDefault(GroovySourceAST t,int visit) {
if (visit == OPENING_VISIT) {
print(t,visit,"<" + tokenNames[t.getType()] + ">");
//out.print("<" + t.getType() + ">");
} else {
print(t,visit,"</" + tokenNames[t.getType()] + ">");
//out.print("</" + t.getType() + ">");
}
} |
java | protected void add(CmsResource resource, boolean check) throws IllegalArgumentException {
if (check) {
// it is essential that this method is only visible within the db package!
if (resource.getState().isUnchanged()) {
throw new CmsIllegalArgumentException(
Messages.get().container(Messages.ERR_PUBLISH_UNCHANGED_RESOURCE_1, resource.getRootPath()));
}
}
if (resource.isFolder()) {
if (resource.getState().isDeleted()) {
if (!m_deletedFolderList.contains(resource)) {
// only add files not already contained in the list
m_deletedFolderList.add(resource);
}
} else {
if (!m_folderList.contains(resource)) {
// only add files not already contained in the list
m_folderList.add(resource);
}
}
} else {
if (!m_fileList.contains(resource)) {
// only add files not already contained in the list
// this is required to make sure no siblings are duplicated
m_fileList.add(resource);
}
}
} |
java | public EClass getIfcStructuredDimensionCallout() {
if (ifcStructuredDimensionCalloutEClass == null) {
ifcStructuredDimensionCalloutEClass = (EClass) EPackage.Registry.INSTANCE
.getEPackage(Ifc2x3tc1Package.eNS_URI).getEClassifiers().get(568);
}
return ifcStructuredDimensionCalloutEClass;
} |
java | public OvhFirewallNetworkRule ip_firewall_ipOnFirewall_rule_POST(String ip, String ipOnFirewall, OvhFirewallActionEnum action, Long destinationPort, OvhFirewallProtocolEnum protocol, OvhFirewallSequenceRangeEnum sequence, String source, Long sourcePort, OvhFirewallOptionTCP tcpOption) throws IOException {
String qPath = "/ip/{ip}/firewall/{ipOnFirewall}/rule";
StringBuilder sb = path(qPath, ip, ipOnFirewall);
HashMap<String, Object>o = new HashMap<String, Object>();
addBody(o, "action", action);
addBody(o, "destinationPort", destinationPort);
addBody(o, "protocol", protocol);
addBody(o, "sequence", sequence);
addBody(o, "source", source);
addBody(o, "sourcePort", sourcePort);
addBody(o, "tcpOption", tcpOption);
String resp = exec(qPath, "POST", sb.toString(), o);
return convertTo(resp, OvhFirewallNetworkRule.class);
} |
java | synchronized private JaspiConfig readConfigFile(final File configFile) throws PrivilegedActionException {
if (tc.isEntryEnabled())
Tr.entry(tc, "readConfigFile", new Object[] { configFile });
if (configFile == null) {
// TODO handle persistence
// String msg = MessageFormatHelper.getFormattedMessage(msgBundle, AdminConstants.MSG_JASPI_PERSISTENT_FILE, new Object[] { PersistenceManager.JASPI_CONFIG });
// throw new RuntimeException(msg);
}
PrivilegedExceptionAction<JaspiConfig> unmarshalFile = new PrivilegedExceptionAction<JaspiConfig>() {
@Override
public JaspiConfig run() throws Exception {
JaspiConfig cfg = null;
JAXBContext jc = JAXBContext.newInstance(JaspiConfig.class);
Object obj = jc.createUnmarshaller().unmarshal(configFile);
if (obj instanceof JaspiConfig) {
cfg = (JaspiConfig) obj;
}
return cfg;
}
};
JaspiConfig jaspi = AccessController.doPrivileged(unmarshalFile);
if (tc.isEntryEnabled())
Tr.exit(tc, "readConfigFile", jaspi);
return jaspi;
} |
java | public static void removeSimilaritiesAndSaveFiles(List<String> filepathes,
Log logging, Boolean isWindows) throws IOException {
List<File> files = new LinkedList<File>();
for (String path : filepathes) {
files.add(new File(path));
}
FileComparer fcomparer;
for (int i = 0; i < files.size(); i++) {
for (int y = i + 1; y < files.size(); y++) {
fcomparer = new FileComparer(files.get(i), files.get(y),
logging, isWindows);
fcomparer.removeSimilarClassesFromFile1();
}
}
} |
java | boolean _close() throws SIResourceException
{
if (tc.isEntryEnabled()) SibTr.entry(tc, "_close");
boolean closedNow = false;
// Indicate that this connection is closed.
synchronized (this)
{
if (!_closed)
closedNow = true;
_closed = true;
}
// Ensure that we inform the parent consumer that this
// bifurcated consumer is closed, so that any messages read
// by this consumer can be unlocked.
if (closedNow) {
try
{
_consumerSession.removeBifurcatedConsumer(this);
}
catch (SISessionDroppedException e)
{
// RMQSessionDroppedException shouldn't occur so FFDC.
FFDCFilter.processException(
e,
"com.ibm.ws.sib.processor.impl.mqproxy.MQLocalization.getCursor",
"1:280:1.27",
this);
SibTr.exception(tc, e);
SIResourceException newE =
new SIResourceException(
nls.getFormattedMessage(
"CONSUMER_CLOSED_ERROR_CWSIP0177",
new Object[] { _localConsumerPoint.getConsumerManager().getDestination().getName(),
_localConsumerPoint.getConsumerManager().getMessageProcessor().getMessagingEngineName()},
null));
if (tc.isEntryEnabled())
SibTr.exit(tc, "getCursor", newE);
throw newE;
}
}
if (tc.isEntryEnabled()) SibTr.exit(tc, "_close", new Boolean(closedNow));
return closedNow;
} |
python | def AddContract(self, contract):
"""
Add a contract to the database.
Args:
contract(neo.SmartContract.Contract): a Contract instance.
"""
super(UserWallet, self).AddContract(contract)
try:
db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes())
db_contract.delete_instance()
except Exception as e:
logger.debug("contract does not exist yet")
sh = bytes(contract.ScriptHash.ToArray())
address, created = Address.get_or_create(ScriptHash=sh)
address.IsWatchOnly = False
address.save()
db_contract = Contract.create(RawData=contract.ToArray(),
ScriptHash=contract.ScriptHash.ToBytes(),
PublicKeyHash=contract.PublicKeyHash.ToBytes(),
Address=address,
Account=self.__dbaccount)
logger.debug("Creating db contract %s " % db_contract)
db_contract.save() |
java | protected boolean parentExists(String path) throws IOException {
// Assume root always has a parent
if (isRoot(path)) {
return true;
}
String parentKey = getParentPath(path);
return parentKey != null && isDirectory(parentKey);
} |
java | public Class<?> getPropertyType() {
Class<?> result = null;
if (getter != null) {
result = getter.getReturnType();
} else if (setter != null) {
Class<?>[] parameterTypes = setter.getParameterTypes();
result = parameterTypes[0];
}
return result;
} |
python | def getTemplate(self, uri, meta=None):
"""Return the template for an action. Cache the result. Can use an optional meta parameter with meta information"""
if not meta:
metaKey = self.cacheKey + '_templatesmeta_cache_' + uri
meta = cache.get(metaKey, None)
if not meta:
meta = self.getMeta(uri)
cache.set(metaKey, meta, 15)
if not meta: # No meta, can return a template
return None
# Let's find the template in the cache
action = urlparse(uri).path
templateKey = self.cacheKey + '_templates_' + action + '_' + meta['template_tag']
template = cache.get(templateKey, None)
# Nothing found -> Retrieve it from the server and cache it
if not template:
r = self.doQuery('template/' + uri)
if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None
template = r.content
cache.set(templateKey, template, None) # None = Cache forever
return template |
java | @Override
public void onDestroyView() {
mHandler.removeCallbacks(mRequestFocus);
mList = null;
mListShown = false;
mEmptyView = mProgressContainer = mListContainer = null;
mStandardEmptyView = null;
super.onDestroyView();
} |
java | public Block getBlock(String sqlquery, Collection queryParams, int startIndex, int count) {
return blockStrategy.getBlock(sqlquery, queryParams, startIndex, count);
} |
java | void error(int e) {
String s = "Error " + e + " at line " + fLineNum + " column "
+ fCharNum;
IllegalArgumentException ex = new IllegalArgumentException(s);
throw ex;
} |
python | def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one() |
python | def get_size(vm_):
'''
Return the VM's size. Used by create_node().
'''
sizes = avail_sizes()
vm_size = six.text_type(config.get_cloud_config_value(
'size', vm_, __opts__, search_global=False
))
for size in sizes:
if vm_size.lower() == sizes[size]['slug']:
return sizes[size]['slug']
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) |
java | public IfcWindowTypePartitioningEnum createIfcWindowTypePartitioningEnumFromString(EDataType eDataType,
String initialValue) {
IfcWindowTypePartitioningEnum result = IfcWindowTypePartitioningEnum.get(initialValue);
if (result == null)
throw new IllegalArgumentException(
"The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'");
return result;
} |
python | def isNewerThan(self, other):
""" Compare if the version of this app is newer that the other """
if self.getValue("name") == other.getValue("name"):
if other.getValue("version"):
if not other.getValue("version"):
return False
else:
return LooseVersion(self.getValue("version")) > LooseVersion(other.getValue("version"))
else:
return True
else:
return False |
java | protected void registerLabelAttributes() {
addAttributeProcessor(new EllipsisLmlAttribute(), "ellipsis");
addAttributeProcessor(new LabelAlignmentLmlAttribute(), "labelAlign", "labelAlignment");
addAttributeProcessor(new LineAlignmentLmlAttribute(), "lineAlign", "lineAlignment");
addAttributeProcessor(new TextAlignmentLmlAttribute(), "textAlign", "textAlignment");
addAttributeProcessor(new WrapLmlAttribute(), "wrap");
} |
python | def _set_overlay_service_policy_brief_state(self, v, load=False):
"""
Setter method for overlay_service_policy_brief_state, mapped from YANG variable /overlay_service_policy_brief_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overlay_service_policy_brief_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overlay_service_policy_brief_state() directly.
YANG Description: Overlay Service Policy Brief
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container='container', presence=False, yang_name="overlay-service-policy-brief-state", rest_name="overlay-service-policy-brief-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """overlay_service_policy_brief_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=overlay_service_policy_brief_state.overlay_service_policy_brief_state, is_container='container', presence=False, yang_name="overlay-service-policy-brief-state", rest_name="overlay-service-policy-brief-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ssm-overlay-service-policy-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='container', is_config=True)""",
})
self.__overlay_service_policy_brief_state = t
if hasattr(self, '_set'):
self._set() |
python | def write_file(self, filename, contents):
"""
Write a text file and provide feedback to the user.
:param filename: The pathname of the file to write (a string).
:param contents: The new contents of the file (a string).
"""
logger.info("Writing file: %s", format_path(filename))
contents = contents.rstrip() + b"\n"
self.context.write_file(filename, contents)
logger.debug("Wrote %s to %s.",
pluralize(len(contents.splitlines()), "line"),
format_path(filename)) |
java | protected boolean shouldAuthorize(Authentication authentication, Authorizable controlledObject)
{
Assert.state(getAccessDecisionManager() != null, "The AccessDecisionManager can not be null!");
boolean authorize = false;
try
{
if (authentication != null)
{
List<ConfigAttribute> cad = getConfigAttributeDefinition(controlledObject);
if (cad != null)
{
getAccessDecisionManager().decide(authentication, null, cad);
}
authorize = true;
} else {
// authentication must be disabled, going through
authorize = true;
}
}
catch (AccessDeniedException e)
{
authorize = false;
// This means the secured objects should not be authorized
}
return authorize;
} |
python | def updateFeatureService(self, efs_config):
"""Updates a feature service.
Args:
efs_config (list): A list of JSON configuration feature service details to update.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
fsRes = None
fst = None
fURL = None
resItm= None
try:
fsRes = []
fst = featureservicetools.featureservicetools(securityinfo=self)
if isinstance(efs_config, list):
for ext_service in efs_config:
fURL = None
cs = 0
try:
if 'ChunkSize' in ext_service:
if common.is_number(ext_service['ChunkSize']):
cs = ext_service['ChunkSize']
except Exception as e:
pass
resItm={"DeleteDetails": None,"AddDetails":None}
if 'ItemId' in ext_service and 'LayerName' in ext_service:
fs = fst.GetFeatureService(itemId=ext_service['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=ext_service['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in ext_service:
fURL = ext_service['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
continue
if 'DeleteInfo' in ext_service:
if str(ext_service['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=ext_service['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print ("Delete Successful: %s" % fURL)
else:
print (str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = ext_service['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print ("Add Successful: %s " % fURL)
else:
print (str(resItm['AddDetails']))
else:
resItm={"DeleteDetails": None,"AddDetails":None}
fURL = efs_config['URL']
cs = 0
try:
if 'ChunkSize' in efs_config:
if common.is_number(efs_config['ChunkSize']):
cs = efs_config['ChunkSize']
except Exception as e:
pass
if 'ItemId' in efs_config and 'LayerName' in efs_config:
fs = fst.GetFeatureService(itemId=efs_config['ItemId'],returnURLOnly=False)
if not fs is None:
fURL = fst.GetLayerFromFeatureService(fs=fs,layerName=efs_config['LayerName'],returnURLOnly=True)
if fURL is None and 'URL' in efs_config:
fURL = efs_config['URL']
if fURL is None:
print("Item and layer not found or URL not in config")
return None
if 'DeleteInfo' in efs_config:
if str(efs_config['DeleteInfo']['Delete']).upper() == "TRUE":
resItm['DeleteDetails'] = fst.DeleteFeaturesFromFeatureLayer(url=fURL, sql=efs_config['DeleteInfo']['DeleteSQL'],chunksize=cs)
if not 'error' in resItm['DeleteDetails'] :
print (" Delete Successful: %s" % fURL)
else:
print (" " + str(resItm['DeleteDetails']))
resItm['AddDetails'] = fst.AddFeaturesToFeatureLayer(url=fURL, pathToFeatureClass = efs_config['FeatureClass'],chunksize=cs)
fsRes.append(resItm)
if not 'error' in resItm['AddDetails']:
print (" Add Successful: %s " % fURL)
else:
print (" " + str(resItm['AddDetails']))
return fsRes
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "updateFeatureService",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
fst = None
fURL = None
resItm= None
del fst
del fURL
del resItm
gc.collect() |
python | def cfht_megacam_tap_query(ra_deg=180.0, dec_deg=0.0, width=1, height=1, date=None):
"""Do a query of the CADC Megacam table.
Get all observations inside the box (right now it turns width/height into a radius, should not do this).
@rtype : Table
@param ra_deg: center of search region, in degrees
@param dec_deg: center of search region in degrees
@param width: width of search region in degrees
@param height: height of search region in degrees
@param date: ISO format date string. Query will be +/- 0.5 days from date given.
"""
radius = min(90, max(width, height) / 2.0)
query = ("SELECT "
"COORD1(CENTROID(Plane.position_bounds)) AS RAJ2000,"
"COORD2(CENTROID(Plane.position_bounds)) AS DEJ2000,"
"target_name "
"FROM "
"caom2.Observation as o "
"JOIN caom2.Plane as Plane on o.obsID=Plane.obsID "
"WHERE o.collection = 'CFHT' "
"AND o.instrument_name = 'MegaPrime' "
"AND INTERSECTS( CIRCLE('ICRS', %f, %f, %f), Plane.position_bounds ) = 1")
query = query % (ra_deg, dec_deg, radius)
if date is not None:
mjd = Time(date, scale='utc').mjd
query += " AND Plane.time_bounds_lower <= {} AND {} <= Plane.time_bounds_upper ".format(mjd+0.5, mjd-0.5)
data = {"QUERY": query,
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
url = "http://www.cadc.hia.nrc.gc.ca/tap/sync"
warnings.simplefilter('ignore')
ff = StringIO(requests.get(url, params=data).content)
ff.seek(0)
table = votable.parse(ff).get_first_table().to_table()
assert isinstance(table, Table)
return table |
java | protected EndpointInfo createEndpointInfo() throws BusException {
String transportId = getTransportId();
if (transportId == null && getAddress() != null) {
DestinationFactory df = getDestinationFactory();
if (df == null) {
DestinationFactoryManager dfm = getBus().getExtension(DestinationFactoryManager.class);
df = dfm.getDestinationFactoryForUri(getAddress());
}
if (df != null) {
transportId = df.getTransportIds().get(0);
}
}
// default to http transport
if (transportId == null) {
transportId = "http://schemas.xmlsoap.org/wsdl/soap/http";
}
setTransportId(transportId);
EndpointInfo ei = new EndpointInfo();
ei.setTransportId(transportId);
ei.setName(serviceFactory.getService().getName());
ei.setAddress(getAddress());
ei.setProperty(PROTOBUF_MESSAGE_CLASS, messageClass);
BindingInfo bindingInfo = createBindingInfo();
ei.setBinding(bindingInfo);
return ei;
} |
python | def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,)) |
java | private static HeaderMap wrapMultiMap(MultiMap headers) {
return new HeaderMap() {
private static final long serialVersionUID = -1406124274678587935L;
@Override()
public String get(String key) {
return headers.get(key);
}
};
} |
java | public static <E, R extends Collection<E>> R all(Iterable<E> iterable, Supplier<R> supplier) {
dbc.precondition(iterable != null, "cannot call all with a null iterable");
final Function<Iterator<E>, R> consumer = new ConsumeIntoCollection<>(supplier);
return consumer.apply(iterable.iterator());
} |
python | def find_seq_id(block, name, case_sensitive=True):
"""Given part of a sequence ID, find the first actual ID that contains it.
Example::
>>> find_seq_id(block, '2QG5')
'gi|158430190|pdb|2QG5|A'
Raise a ValueError if no matching key is found.
"""
# logging.warn("DEPRECATED: Try to use cma.find_seq_rec instead")
rec = find_seq_rec(block, name, case_sensitive)
return rec['id'] |
python | def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running) |
java | private static IAction createAction(String action) {
IAction actn = null;
if (!StringUtils.isEmpty(action)) {
if ((actn = ActionRegistry.getRegisteredAction(action)) == null) {
actn = ActionUtil.createAction(null, action);
}
}
return actn;
} |
java | public Observable<String> getMetadataAsync(String scope) {
return getMetadataWithServiceResponseAsync(scope).map(new Func1<ServiceResponse<String>, String>() {
@Override
public String call(ServiceResponse<String> response) {
return response.body();
}
});
} |
python | def _build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew.
"""
_, _, Luu, L, _, _, gamma = self._build_common_terms()
Kus = features.Kuf(self.feature, self.kern, Xnew) # size M x Xnew
w = tf.matrix_triangular_solve(Luu, Kus, lower=True) # size M x Xnew
tmp = tf.matrix_triangular_solve(tf.transpose(L), gamma, lower=False)
mean = tf.matmul(w, tmp, transpose_a=True) + self.mean_function(Xnew)
intermediateA = tf.matrix_triangular_solve(L, w, lower=True)
if full_cov:
var = self.kern.K(Xnew) - tf.matmul(w, w, transpose_a=True) \
+ tf.matmul(intermediateA, intermediateA, transpose_a=True)
var = tf.tile(var[None, ...], [self.num_latent, 1, 1]) # P x N x N
else:
var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(w), 0) \
+ tf.reduce_sum(tf.square(intermediateA), 0) # size Xnew,
var = tf.tile(var[:, None], [1, self.num_latent])
return mean, var |
python | def execute(self, triple_map, output, **kwargs):
"""Method executes mapping between CSV source and
output RDF
args:
triple_map(SimpleNamespace): Triple Map
"""
subject = self.generate_term(term_map=triple_map.subjectMap,
**kwargs)
start_size = len(output)
all_subjects = []
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.template is not None:
object_ = self.generate_term(term_map=pred_obj_map, **kwargs)
if len(str(object)) > 0:
output.add((
subject,
predicate,
object_))
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
parent_map=pred_obj_map.parentTriplesMap,
subject=subject,
predicate=predicate,
**kwargs)
if pred_obj_map.reference is not None:
object_ = self.generate_term(term_map=pred_obj_map,
**kwargs)
if object_ and len(str(object_)) > 0:
output.add((subject, predicate, object_))
if pred_obj_map.constant is not None:
output.add((subject, predicate, pred_obj_map.constant))
finish_size = len(output)
if finish_size > start_size:
output.add((subject,
NS_MGR.rdf.type.rdflib,
triple_map.subjectMap.class_))
all_subjects.append(subject)
return all_subjects |
python | def from_dict(cls, copula_dict):
"""Set attributes with provided values."""
instance = cls()
instance.fitted = copula_dict['fitted']
instance.constant_value = copula_dict['constant_value']
if instance.fitted and not instance.constant_value:
instance.model = scipy.stats.gaussian_kde([-1, 0, 0])
for key in ['dataset', 'covariance', 'inv_cov']:
copula_dict[key] = np.array(copula_dict[key])
attributes = ['d', 'n', 'dataset', 'covariance', 'factor', 'inv_cov']
for name in attributes:
setattr(instance.model, name, copula_dict[name])
return instance |
python | def put(self, measurementId):
"""
Initiates a new measurement. Accepts a json payload with the following attributes;
* duration: in seconds
* startTime OR delay: a date in YMD_HMS format or a delay in seconds
* description: some free text information about the measurement
:return:
"""
json = request.get_json()
try:
start = self._calculateStartTime(json)
except ValueError:
return 'invalid date format in request', 400
duration = json['duration'] if 'duration' in json else 10
if start is None:
# should never happen but just in case
return 'no start time', 400
else:
scheduled, message = self._measurementController.schedule(measurementId, duration, start,
description=json.get('description'))
return message, 200 if scheduled else 400 |
java | public Observable<ContentKeyPolicyInner> createOrUpdateAsync(String resourceGroupName, String accountName, String contentKeyPolicyName, ContentKeyPolicyInner parameters) {
return createOrUpdateWithServiceResponseAsync(resourceGroupName, accountName, contentKeyPolicyName, parameters).map(new Func1<ServiceResponse<ContentKeyPolicyInner>, ContentKeyPolicyInner>() {
@Override
public ContentKeyPolicyInner call(ServiceResponse<ContentKeyPolicyInner> response) {
return response.body();
}
});
} |
python | def put_bug(self, bugid, bug_update):
'''http://bugzilla.readthedocs.org/en/latest/api/core/v1/bug.html#update-bug'''
assert type(bug_update) is DotDict
if (not 'ids' in bug_update):
bug_update.ids = [bugid]
return self._put('bug/{bugid}'.format(bugid=bugid),
json.dumps(bug_update)) |
python | def set(self, **kwargs):
"""Sets an internal setting for acquistion, using keywords.
Available parameters to set:
:param acqtime: duration of recording (input) window (seconds)
:type acqtime: float
:param aifs: sample rate of the recording (input) operation (Hz)
:type aifs: int
:param aochan: AO (generation) channel name
:type aochan: str
:param aichan: AI (recording) channel name
:type aichan: str
:param nreps: number of repetitions for each unique stimulus
:type nreps: int
:param binsz: time bin duration for spike sorting (seconds)
:type binsz: float
:param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>`
:type caldb: float
:param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>`
:type calv: float
:param datafile: a reference to an open file to save data to
:type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>`
:param average: whether to average repetitions of a trace, saving only the averaged signal
:type average: bool
:param reject: whether to reject values higher than a defined threshold. Only used while average is true
:type reject: bool
:param rejectrate: the value to base artifact rejection on
:type rejectrate: float
"""
self.player_lock.acquire()
if 'acqtime' in kwargs:
self.player.set_aidur(kwargs['acqtime'])
if 'aifs' in kwargs:
self.player.set_aifs(kwargs['aifs'])
self.aifs = kwargs['aifs']
if 'aifs' in kwargs or 'acqtime' in kwargs:
t = kwargs.get('acqtime', self.player.get_aidur())
npoints = t*float(kwargs.get('aifs', self.player.get_aifs()))
self.aitimes = np.linspace(0, t, npoints)
if 'trigger' in kwargs:
self.player.set_trigger(kwargs['trigger'])
self.player_lock.release()
if 'aochan' in kwargs:
self.aochan = kwargs['aochan']
if 'aichan' in kwargs:
self.aichan = kwargs['aichan']
if 'binsz' in kwargs:
self.binsz = kwargs['binsz']
if 'save' in kwargs:
self.save_data = kwargs['save']
if 'caldb' in kwargs:
self.caldb = kwargs['caldb']
if 'calv' in kwargs:
self.calv = kwargs['calv']
if 'calf' in kwargs:
self.calf = kwargs['calf']
if 'caldb' in kwargs or 'calv' in kwargs:
self.update_reference_voltage()
if 'datafile' in kwargs:
self.datafile = kwargs['datafile']
if 'reprate' in kwargs:
self.reprate = kwargs['reprate']
if 'save' in kwargs:
self.save_data = kwargs['save']
if 'average' in kwargs:
self.average = kwargs['average']
if 'reject' in kwargs:
self.reject = kwargs['reject']
if 'rejectrate' in kwargs:
self.rejectrate = kwargs['rejectrate'] |
python | def install_plugin(pkgpath, plugin_type, install_path, register_func):
"""Install specified plugin.
:param pkgpath: Name of plugin to be downloaded from online repo or path to plugin folder or zip file.
:param install_path: Path where plugin will be installed.
:param register_func: Method used to register and validate plugin.
"""
service_name = os.path.basename(pkgpath)
if os.path.exists(os.path.join(install_path, service_name)):
raise exceptions.PluginAlreadyInstalled(pkgpath)
if os.path.exists(pkgpath):
logger.debug("%s exists in filesystem", pkgpath)
if os.path.isdir(pkgpath):
pip_status = install_dir(pkgpath, install_path, register_func)
else: # pkgpath is file
pip_status = install_from_zip(pkgpath, install_path, register_func)
else:
logger.debug("cannot find %s locally, checking github repo", pkgpath)
click.secho("Collecting {}..".format(pkgpath))
pip_status = install_from_repo(pkgpath, plugin_type, install_path, register_func)
if pip_status == 0:
click.secho("[+] Great success!")
else:
# TODO: rephrase
click.secho("[-] Service installed but something was odd with dependency install, please review debug logs") |
java | private void addParent(MavenPomDescriptor pomDescriptor, Model model, ScannerContext context) {
Parent parent = model.getParent();
if (null != parent) {
ArtifactResolver resolver = getArtifactResolver(context);
MavenArtifactDescriptor parentDescriptor = resolver.resolve(new ParentCoordinates(parent), context);
pomDescriptor.setParent(parentDescriptor);
}
} |
python | def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data:
"""Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
"""
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
# check filepath
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create headers dictionary
headers = collections.OrderedDict()
header_lines = 0
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
while True:
line = f.readline().strip()
header_lines += 1
if len(line) == 0:
break
else:
key, value = line.split(":", 1)
if key.strip() == "Revision":
headers["resolution"] = int(value.strip(" bits ADC"))
else:
headers[key.strip()] = value.strip()
line = f.readline().strip()
while "_BEGIN" in line:
header_lines += 1
section = line.split("_BEGIN")[0]
while True:
line = f.readline().strip()
header_lines += 1
if section + "_END" in line:
break
if section == "SYS_PARA":
use_type = {
"B": lambda b: int(b) == 1,
"C": str, # e.g. #SP [SP_OVERFL,C,N]
"F": float,
"I": int,
"L": int, # e.g. #DI [DI_MAXCNT,L,128]
"S": str,
"U": int, # unsigned int?
}
item = line[line.find("[") + 1 : line.find("]")].split(",")
key = item[0]
value = use_type[item[1]](item[2])
headers[key] = value
else:
splitted = line.split()
value = splitted[-1][1:-1].split(",")
key = " ".join(splitted[:-1])
headers[key] = value
line = f.readline().strip()
if "END" in line:
header_lines += 1
break
if "Date" in headers.keys() and "Time" in headers.keys():
# NOTE: reports created in local time, no-way to calculate absolute time
created = " ".join([headers["Date"], headers["Time"]])
created = time.strptime(created, "%Y-%m-%d %H:%M:%S")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
headers["created"] = created
# initialize data object
kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers}
if parent:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# import data
f.seek(0)
arr = np.genfromtxt(
f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True
)
f.close()
# construct data
data.create_variable(name="time", values=arr[0], units="ns")
data.create_channel(name="counts", values=arr[1])
data.transform("time")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" kind: {0}".format(data.kind))
print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1]))
print(" size: {0}".format(data.size))
if "SP_COL_T" in data.attrs.keys():
print(" collection time: {0} sec".format(data.attrs["SP_COL_T"]))
return data |
python | def _post(url:str, params:dict, headers:dict) -> dict:
"""
Make a POST call.
"""
response = requests.post(url, params=params, headers=headers)
data = response.json()
if response.status_code != 200 or "error" in data:
raise GoogleApiError({"status_code": response.status_code,
"error": data.get("error", "")})
return data |
java | private static int compressionLevel(int blockSize) {
int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2
assert (1 << compressionLevel) >= blockSize;
assert blockSize * 2 > (1 << compressionLevel);
compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE);
assert compressionLevel >= 0 && compressionLevel <= 0x0F;
return compressionLevel;
} |
python | def is_writer(self, check_pending=True):
"""Returns if the caller is the active writer or a pending writer."""
me = self._current_thread()
if self._writer == me:
return True
if check_pending:
return me in self._pending_writers
else:
return False |
java | private static Stream<Class<?>> getSuperclassesAsStream(Class<?> clazz, boolean includeArgument) {
return Stream.concat(includeArgument ? Stream.of(clazz) : Stream.empty(),
Optional.ofNullable(clazz.getSuperclass())
.map(Stream::of)
.orElseGet(Stream::empty)
.flatMap(superclass -> getSuperclassesAsStream(superclass, true)));
} |
python | def apply_dependencies(self, hosts):
"""Wrapper to loop over services and call Service.fill_daddy_dependency()
:return: None
"""
for service in self:
if service.host and service.host_dependency_enabled:
host = hosts[service.host]
if host.active_checks_enabled:
service.act_depend_of.append(
(service.host, ['d', 'x', 's', 'f'], '', True)
)
host.act_depend_of_me.append(
(service.uuid, ['d', 'x', 's', 'f'], '', True)
)
host.child_dependencies.add(service.uuid)
service.parent_dependencies.add(service.host) |
java | public int getIDFromCode(String strCode)
{
int iID = 0;
try {
iID = Integer.parseInt(strCode); // Special case - if an integer, just convert it.
} catch (NumberFormatException ex) {
iID = 0;
}
if (iID == 0)
{
Record record = this.getReferenceRecord();
if (record != null)
iID = record.getIDFromCode(strCode);
}
return iID;
} |
java | @Override
public byte[] getBinaryValue(Base64Variant b64variant) throws IOException {
Object n = currentNode();
if (n instanceof byte[]) {
return (byte[]) n;
} else if (n instanceof org.bson.types.ObjectId) {
return ((org.bson.types.ObjectId) n).toByteArray();
}
return null;
}
@Override
public Object getEmbeddedObject() throws IOException, JsonParseException {
return currentNode();
}
/*
/**********************************************************
/* Internal methods
/**********************************************************
*/
protected Object currentNode() {
if (closed || nodeCursor == null) {
return null;
}
return nodeCursor.currentNode();
} |
python | def _parse_content(self, text):
'''Try to parse as HAL, but on failure use an empty dict'''
try:
return super(OrphanHALNavigator, self)._parse_content(text)
except exc.UnexpectedlyNotJSON:
return {} |
python | def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.