language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def referrer_uri(self, value):
"""The referrer_uri property.
Args:
value (string). the property value.
"""
if value == self._defaults['referrerUri'] and 'referrerUri' in self._values:
del self._values['referrerUri']
else:
self._values['referrerUri'] = value |
java | public static int[] validate2(int[] data, boolean allowSz1, String paramName){
if(data == null) {
return null;
}
if(allowSz1){
Preconditions.checkArgument(data.length == 1 || data.length == 2,
"Need either 1 or 2 %s values, got %s values: %s",
paramName, data.length, data);
} else {
Preconditions.checkArgument(data.length == 2,"Need 2 %s values, got %s values: %s",
paramName, data.length, data);
}
if(data.length == 1){
return new int[]{data[0], data[0]};
} else {
return data;
}
} |
python | def _id_for_pc(self, name):
""" Given the name of the PC, return the database identifier. """
if not name in self.pc2id_lut:
self.c.execute("INSERT INTO pcs (name) VALUES ( ? )", (name,))
self.pc2id_lut[name] = self.c.lastrowid
self.id2pc_lut[self.c.lastrowid] = name
return self.pc2id_lut[name] |
java | public ModifyVpcEndpointServiceConfigurationRequest withAddNetworkLoadBalancerArns(String... addNetworkLoadBalancerArns) {
if (this.addNetworkLoadBalancerArns == null) {
setAddNetworkLoadBalancerArns(new com.amazonaws.internal.SdkInternalList<String>(addNetworkLoadBalancerArns.length));
}
for (String ele : addNetworkLoadBalancerArns) {
this.addNetworkLoadBalancerArns.add(ele);
}
return this;
} |
java | @Override
public Collection<PersonDocument> findByFileAndSurname(
final String filename, final String surname) {
if (filename == null || surname == null) {
return Collections.emptyList();
}
final Query searchQuery =
new Query(Criteria.where("surname").is(surname)
.and("filename").is(filename));
final List<PersonDocumentMongo> personDocuments =
mongoTemplate.find(searchQuery, PersonDocumentMongo.class);
createGedObjects(personDocuments);
return copy(personDocuments);
} |
java | private boolean checkAppPathsForVia(Set<String> keySet, List<String> errors) {
for (String key : keySet) {
if (!key.equals("defaultKey")) {
File file = new File(key);
if (!file.exists()) {
errors.add("Effective Usage Analysis will not run if the -appPath parameter references an invalid file path. Check that the -appPath parameter specifies a valid path");
return false;
} else if (!file.isFile()) {
errors.add("Effective Usage Analysis will not run if the -appPath parameter references an invalid file path. Check that the -appPath parameter specifies a valid path");
return false;
} else {
return true;
}
}
}
return false;
} |
python | def after_reject(analysis):
"""Function triggered after the "reject" transition for the analysis passed
in is performed."""
# Remove from the worksheet
remove_analysis_from_worksheet(analysis)
# Reject our dependents (analyses that depend on this analysis)
cascade_to_dependents(analysis, "reject")
if IRequestAnalysis.providedBy(analysis):
# Try verify (for when remaining analyses are in 'verified')
doActionFor(analysis.getRequest(), "verify")
# Try submit (remaining analyses are in 'to_be_verified')
doActionFor(analysis.getRequest(), "submit")
# Try rollback (no remaining analyses or some not submitted)
doActionFor(analysis.getRequest(), "rollback_to_receive")
reindex_request(analysis) |
python | def exists(method,
ip,
port=None,
proto='tcp',
direction='in',
port_origin='d',
ip_origin='d',
ttl=None,
comment=''):
'''
Returns true a rule for the ip already exists
based on the method supplied. Returns false if
not found.
CLI Example:
.. code-block:: bash
salt '*' csf.exists allow 1.2.3.4
salt '*' csf.exists tempdeny 1.2.3.4
'''
if method.startswith('temp'):
return _temp_exists(method, ip)
if port:
rule = _build_port_rule(ip, port, proto, direction, port_origin, ip_origin, comment)
return _exists_with_port(method, rule)
exists = __salt__['cmd.run_all']("egrep ^'{0} +' /etc/csf/csf.{1}".format(ip, method))
return not bool(exists['retcode']) |
python | def get_file(cls, filename=None):
"""
Load settings from an rtv configuration file.
"""
if filename is None:
filename = CONFIG
config = configparser.ConfigParser()
if os.path.exists(filename):
with codecs.open(filename, encoding='utf-8') as fp:
config.readfp(fp)
return cls._parse_rtv_file(config) |
java | public boolean removeBusItinerary(BusItinerary itinerary) {
final int index = this.itineraries.indexOf(itinerary);
if (index >= 0) {
return removeBusItinerary(index);
}
return false;
} |
python | def transform(source):
'''Used to convert the source code, making use of known transformers.
"transformers" are modules which must contain a function
transform_source(source)
which returns a tranformed source.
Some transformers (for example, those found in the standard library
module lib2to3) cannot cope with non-standard syntax; as a result, they
may fail during a first attempt. We keep track of all failing
transformers and keep retrying them until either they all succeeded
or a fixed set of them fails twice in a row.
'''
source = extract_transformers_from_source(source)
# Some transformer fail when multiple non-Python constructs
# are present. So, we loop multiple times keeping track of
# which transformations have been unsuccessfully performed.
not_done = transformers
while True:
failed = {}
for name in not_done:
tr_module = import_transformer(name)
try:
source = tr_module.transform_source(source)
except Exception as e:
failed[name] = tr_module
# from traceback import print_exc
# print("Unexpected exception in transforms.transform",
# e.__class__.__name__)
# print_exc()
if not failed:
break
# Insanity is doing the same Tting over and overaAgain and
# expecting different results ...
# If the exact same set of transformations are not performed
# twice in a row, there is no point in trying out a third time.
if failed == not_done:
print("Warning: the following transforms could not be done:")
for key in failed:
print(key)
break
not_done = failed # attempt another pass
return source |
python | def n_feature_hash(feature, dims, seeds):
"""N-hot-encoded feature hashing.
Args:
feature (str): Target feature represented as string.
dims (list of int): Number of dimensions for each hash value.
seeds (list of float): Seed of each hash function (mmh3).
Returns:
numpy 1d array: n-hot-encoded feature vector for `s`.
"""
vec = np.zeros(sum(dims))
offset = 0
for seed, dim in zip(seeds, dims):
vec[offset:(offset + dim)] = feature_hash(feature, dim, seed)
offset += dim
return vec |
java | public void setProperty(Class aClass, Object object, String property, Object newValue, boolean b, boolean b1) {
if (null == interceptor) {
super.setProperty(aClass, object, property, newValue, b, b1);
}
if (interceptor instanceof PropertyAccessInterceptor) {
PropertyAccessInterceptor pae = (PropertyAccessInterceptor) interceptor;
pae.beforeSet(object, property, newValue);
if (interceptor.doInvoke()) {
super.setProperty(aClass, object, property, newValue, b, b1);
}
} else {
super.setProperty(aClass, object, property, newValue, b, b1);
}
} |
java | public ServiceFuture<List<PatternRuleInfo>> getIntentPatternsAsync(UUID appId, String versionId, UUID intentId, GetIntentPatternsOptionalParameter getIntentPatternsOptionalParameter, final ServiceCallback<List<PatternRuleInfo>> serviceCallback) {
return ServiceFuture.fromResponse(getIntentPatternsWithServiceResponseAsync(appId, versionId, intentId, getIntentPatternsOptionalParameter), serviceCallback);
} |
java | protected List<String> loadDeps(Scriptable cfg) {
final String methodName = "loadDeps"; //$NON-NLS-1$
Object depsList = cfg.get(DEPS_CONFIGPARAM, cfg);
List<String> deps = new LinkedList<String>();
if (depsList instanceof Scriptable) {
for (Object id : ((Scriptable)depsList).getIds()) {
if (id instanceof Number) {
Number i = (Number)id;
Object entry = ((Scriptable)depsList).get((Integer)i, (Scriptable)depsList);
deps.add(toString(entry));
}
}
log.logp(Level.WARNING, ConfigImpl.class.getName(), methodName, Messages.ConfigImpl_0);
}
return deps;
} |
java | public final Cache2kBuilder<K, V> expiryPolicy(ExpiryPolicy<K, V> c) {
config().setExpiryPolicy(wrapCustomizationInstance(c));
return this;
} |
java | public boolean isToggleButtonChecked(String text)
{
if(config.commandLogging){
Log.d(config.commandLoggingTag, "isToggleButtonChecked(\""+text+"\")");
}
return checker.isButtonChecked(ToggleButton.class, text);
} |
java | public ExecutorCommand<V> withBreakerRetryTimeout(long breakerRetryTimeout, TimeUnit breakerRetryTimeUnit) {
config.setBreakerRetryTimeout(breakerRetryTimeUnit.toMillis(breakerRetryTimeout));
return this;
} |
python | def p_program_line_label(p):
""" label_line : LABEL statements
| LABEL co_statements
"""
lbl = make_label(p[1], p.lineno(1))
p[0] = make_block(lbl, p[2]) if len(p) == 3 else lbl |
java | @Override
public boolean hasTileToRetrieve(int x, int y, int zoom) {
return retriever.hasTile(x, y, zoom);
} |
java | @Override
protected Environment createEnvironment() {
HttpServletRequest request = getBackingRequest();
String postPath = getResponseUrl(request);
String baseUrl = getBaseUrl(request);
String userAgent = request.getHeader("user-agent");
/**
* Careful - this won't be serializable
*/
HttpServletEnvironment env = new HttpServletEnvironment(postPath, baseUrl, userAgent);
if (request instanceof SubSessionHttpServletRequestWrapper) {
env.setSubsessionId(((SubSessionHttpServletRequestWrapper) request).getSessionId());
}
return env;
} |
python | def formfield(self, **kwargs):
"""
Get choices from plugins, if necessary.
"""
if self.plugin_class:
self._choices = self.plugin_class.get_all_choices(field=self)
return super(TemplateNameField, self).formfield(**kwargs) |
python | def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request) |
python | def run(self, react=True, strict=True, roles=1):
"""Select a cast and perform the next scene.
:param bool react: If `True`, then Property directives are executed
at the point they are encountered. Pass `False` to skip them
so they can be enacted later on.
:param bool strict: Only fully-cast scripts to be performed.
:param int roles: Maximum number of roles permitted each character.
This method is a generator. It yields events from the performance.
If a :py:class:`~turberfield.dialogue.model.Model.Condition` is
encountered, it is evaluated. No events are generated while the most recent
condition is False.
A new :py:class:`~turberfield.dialogue.model.Model.Shot` resets the
current condition.
"""
try:
folder, index, self.script, self.selection, interlude = self.next(
self.folders, self.ensemble,
strict=strict, roles=roles
)
except TypeError:
raise GeneratorExit
with self.script as dialogue:
model = dialogue.cast(self.selection).run()
for shot, item in model:
if self.condition is not False:
yield shot
yield item
if not self.shots or self.shots[-1][:2] != shot[:2]:
self.shots.append(shot._replace(items=self.script.fP))
self.condition = None
if isinstance(item, Model.Condition):
self.condition = self.allows(item)
if react:
self.react(item)
for key, value in model.metadata:
if value not in self.metadata[key]:
self.metadata[key].append(value) |
java | public int[] toArray() {
int[] arr = new int[length()];
for (int i = 0; i < arr.length; ++i)
arr[i] = get(i);
return arr;
} |
java | Path dumpXmlReport(Report report) {
if (!settings.getBoolean(PROPERTY_GENERATE_XML).orElse(false)) {
return null;
}
try {
final String reportAsString = reportToString(report);
final Path reportFile = writeToWorkingDirectory(reportAsString, PMD_RESULT_XML);
LOG.info("PMD output report: " + reportFile.toString());
return reportFile;
} catch (IOException e) {
throw new IllegalStateException("Fail to save the PMD report", e);
}
} |
java | @Override
public void beforeFirst(SearchRange searchRange) {
if (!searchRange.isValid())
return;
search(searchRange, SearchPurpose.READ);
} |
java | public Builder andCondition(Class<? extends Annotation> condition,
Object... details) {
Assert.notNull(condition, "Condition must not be null");
return andCondition("@" + ClassUtils.getShortName(condition), details);
} |
java | public DescribeTapesResult withTapes(Tape... tapes) {
if (this.tapes == null) {
setTapes(new com.amazonaws.internal.SdkInternalList<Tape>(tapes.length));
}
for (Tape ele : tapes) {
this.tapes.add(ele);
}
return this;
} |
python | def update_preview(self, index=None, scheme_name=None):
"""
Update the color scheme of the preview editor and adds text.
Note
----
'index' is needed, because this is triggered by a signal that sends
the selected index.
"""
text = ('"""A string"""\n\n'
'# A comment\n\n'
'# %% A cell\n\n'
'class Foo(object):\n'
' def __init__(self):\n'
' bar = 42\n'
' print(bar)\n'
)
show_blanks = CONF.get('editor', 'blank_spaces')
update_scrollbar = CONF.get('editor', 'scroll_past_end')
if scheme_name is None:
scheme_name = self.current_scheme
self.preview_editor.setup_editor(linenumbers=True,
markers=True,
tab_mode=False,
font=get_font(),
show_blanks=show_blanks,
color_scheme=scheme_name,
scroll_past_end=update_scrollbar)
self.preview_editor.set_text(text)
self.preview_editor.set_language('Python') |
java | protected List<GroovyRowResult> asList(String sql, ResultSet rs,
@ClosureParams(value=SimpleType.class, options="java.sql.ResultSetMetaData") Closure metaClosure) throws SQLException {
return asList(sql, rs, 0, 0, metaClosure);
} |
java | @SuppressWarnings("unchecked")
public EList<IfcProperty> getApprovedProperties() {
return (EList<IfcProperty>) eGet(
Ifc2x3tc1Package.Literals.IFC_APPROVAL_PROPERTY_RELATIONSHIP__APPROVED_PROPERTIES, true);
} |
java | public String getHtmlForDbSelection() {
StringBuffer buf = new StringBuffer(2048);
buf.append(
"<select name=\"fullDatabaseKey\" style=\"width: 250px;\" size=\"1\" onchange=\"location.href='../../step_3_database_selection.jsp?fullDatabaseKey='+this.options[this.selectedIndex].value;\">");
buf.append("<!-- --------------------- JSP CODE --------------------------- -->");
// get all available databases
List<String> databases = getSortedDatabases();
// List all databases found in the dbsetup.properties
if ((databases != null) && (databases.size() > 0)) {
List<String> sqlDbs = new ArrayList<String>();
for (String dbKey : databases) {
sqlDbs.add(dbKey);
}
// show the sql dbs first
for (String dbKey : sqlDbs) {
String dn = getDatabaseName(dbKey);
String selected = "";
if (getFullDatabaseKey().equals(dbKey + "_sql")) {
selected = "selected";
}
buf.append("<option value='" + dbKey + "_sql' " + selected + ">" + dn);
}
} else {
buf.append("<option value='null'>no database found");
}
buf.append("<!-- --------------------------------------------------------- -->");
buf.append("</select>");
return buf.toString();
} |
python | def run_forever(self, start_at='once'):
"""
Starts the scheduling engine
@param start_at: 'once' -> start immediately
'next_minute' -> start at the first second of the next minutes
'next_hour' -> start 00:00 (min) next hour
'tomorrow' -> start at 0h tomorrow
"""
if start_at not in ('once', 'next_minute', 'next_hour', 'tomorrow'):
raise ValueError("start_at parameter must be one of these values: 'once', 'next_minute', 'next_hour', 'tomorrow'")
if start_at != 'once':
wait_until(start_at)
try:
task_pool = self.run_tasks()
while self.running:
gevent.sleep(seconds=1)
task_pool.join(timeout=30)
task_pool.kill()
except KeyboardInterrupt:
# https://github.com/surfly/gevent/issues/85
task_pool.closed = True
task_pool.kill()
logging.getLogger(self.logger_name).info('Time scheduler quits') |
java | public Stream<T> onNext(final Action1<? super T> action) {
return new Stream<T>() {
@Override
public Iterator<T> iterator() {
return new ReadOnlyIterator<T>() {
Iterator<T> iterator = Stream.this.iterator();
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public T next() {
final T next = iterator.next();
action.call(next);
return next;
}
};
}
};
} |
java | public void markNeighbor( DelaunayTriangle t )
{
if( t.contains( points[1], points[2] ) )
{
neighbors[0] = t;
t.markNeighbor( points[1], points[2], this );
}
else if( t.contains( points[0], points[2] ) )
{
neighbors[1] = t;
t.markNeighbor( points[0], points[2], this );
}
else if( t.contains( points[0], points[1] ) )
{
neighbors[2] = t;
t.markNeighbor( points[0], points[1], this );
}
else
{
logger.error( "markNeighbor failed" );
}
} |
python | def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values) |
java | public static String getType(String fileStreamHexHead) {
for (Entry<String, String> fileTypeEntry : fileTypeMap.entrySet()) {
if(StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
return null;
} |
java | private void generateNextPermutationIndices()
{
if (remainingPermutations == 0)
{
throw new IllegalStateException("There are no permutations remaining. " +
"Generator must be reset to continue using.");
}
else if (remainingPermutations < totalPermutations)
{
// Find largest index j with permutationIndices[j] < permutationIndices[j + 1]
int j = permutationIndices.length - 2;
while (permutationIndices[j] > permutationIndices[j + 1])
{
j--;
}
// Find index k such that permutationIndices[k] is smallest integer greater than
// permutationIndices[j] to the right of permutationIndices[j].
int k = permutationIndices.length - 1;
while (permutationIndices[j] > permutationIndices[k])
{
k--;
}
// Interchange permutation indices.
int temp = permutationIndices[k];
permutationIndices[k] = permutationIndices[j];
permutationIndices[j] = temp;
// Put tail end of permutation after jth position in increasing order.
int r = permutationIndices.length - 1;
int s = j + 1;
while (r > s)
{
temp = permutationIndices[s];
permutationIndices[s] = permutationIndices[r];
permutationIndices[r] = temp;
r--;
s++;
}
}
--remainingPermutations;
} |
python | def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task |
java | public final void entryRuleXSwitchExpression() throws RecognitionException {
try {
// InternalXbaseWithAnnotations.g:1034:1: ( ruleXSwitchExpression EOF )
// InternalXbaseWithAnnotations.g:1035:1: ruleXSwitchExpression EOF
{
if ( state.backtracking==0 ) {
before(grammarAccess.getXSwitchExpressionRule());
}
pushFollow(FOLLOW_1);
ruleXSwitchExpression();
state._fsp--;
if (state.failed) return ;
if ( state.backtracking==0 ) {
after(grammarAccess.getXSwitchExpressionRule());
}
match(input,EOF,FOLLOW_2); if (state.failed) return ;
}
}
catch (RecognitionException re) {
reportError(re);
recover(input,re);
}
finally {
}
return ;
} |
java | protected String getMappingString() {
StringBuilder sb = new StringBuilder();
final int schemaSize = uriMap.size();
final int domainSize = patternMap.size();
int s = 0, d = 0;
for (String schema: uriMap.keySet()) {
if (s > 0) sb.append(", ");
sb.append(schema);
s++;
if (s > 2 && schemaSize > 3) {
sb.append(", ");
sb.append(schemaSize - s);
sb.append(" more");
break;
}
}
if (schemaSize > 0 && domainSize > 0) sb.append("; ");
for (Pattern domain: patternMap.keySet()) {
if (d > 0) sb.append(", ");
sb.append(domain.pattern());
d++;
if (d > 2 && domainSize > 3) {
sb.append(", ");
sb.append(domainSize - s);
sb.append(" more");
break;
}
}
return sb.toString();
} |
java | public void firstScan(List<String> packageNameList) throws Exception {
LOGGER.debug("start to scan package: " + packageNameList.toString());
// 获取扫描对象并分析整合
scanModel = scanStaticStrategy.scan(packageNameList);
// 增加非注解的配置
scanModel.setJustHostFiles(DisconfCenterHostFilesStore.getInstance().getJustHostFiles());
// 放进仓库
for (StaticScannerMgr scannerMgr : staticScannerMgrList) {
// 扫描进入仓库
scannerMgr.scanData2Store(scanModel);
// 忽略哪些KEY
scannerMgr.exclude(DisClientConfig.getInstance().getIgnoreDisconfKeySet());
}
} |
python | def as_dict(self):
""" Return the URI object as a dictionary"""
d = {k:v for (k,v) in self.__dict__.items()}
return d |
java | @Override
public GetStreamingDistributionResult getStreamingDistribution(GetStreamingDistributionRequest request) {
request = beforeClientExecution(request);
return executeGetStreamingDistribution(request);
} |
java | public ChannelBuffer formatThreadStatsV1(final List<Map<String, Object>> stats) {
throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED,
"The requested API endpoint has not been implemented",
this.getClass().getCanonicalName() +
" has not implemented formatThreadStatsV1");
} |
java | public static void copyStreamUnsafelyUseWithCaution( InputStream in, OutputStream os ) throws IOException {
byte[] buf = new byte[ 1024 ];
int len;
while((len = in.read( buf )) > 0) {
os.write( buf, 0, len );
}
} |
java | public void contextInitialized(ServletContextEvent servletContextEvent)
{
ServletContext ctx = servletContextEvent.getServletContext();
ctx.log("******* Mounting up GreenPepper-Server");
try
{
URL url = GPServletContextListener.class.getClassLoader().getResource(GREENPEPPER_CONFIG);
Properties sProperties = ServerConfiguration.load(url).getProperties();
injectAdditionalProperties(ctx, sProperties);
HibernateSessionService service = new HibernateSessionService(sProperties);
ctx.setAttribute(ServletContextKeys.SESSION_SERVICE, service);
ctx.log("Boostrapping datas");
new BootstrapData(service, sProperties).execute();
}
catch (Exception e)
{
throw new RuntimeException(e);
}
} |
python | def is_valid(email):
"""Email address validation method.
:param email: Email address to be saved.
:type email: basestring
:returns: True if email address is correct, False otherwise.
:rtype: bool
"""
if isinstance(email, basestring) and EMAIL_RE.match(email):
return True
return False |
java | public void marshall(DescribeSecurityProfileRequest describeSecurityProfileRequest, ProtocolMarshaller protocolMarshaller) {
if (describeSecurityProfileRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(describeSecurityProfileRequest.getSecurityProfileName(), SECURITYPROFILENAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def on_dn_d_mode_change(self, dnd_mode):
"""Notification when the drag'n drop mode changes.
in dnd_mode of type :class:`DnDMode`
The new mode for drag'n drop.
"""
if not isinstance(dnd_mode, DnDMode):
raise TypeError("dnd_mode can only be an instance of type DnDMode")
self._call("onDnDModeChange",
in_p=[dnd_mode]) |
python | def find_closest_weather(weathers_list, unixtime):
"""
Extracts from the provided list of Weather objects the item which is
closest in time to the provided UNIXtime.
:param weathers_list: a list of *Weather* objects
:type weathers_list: list
:param unixtime: a UNIX time
:type unixtime: int
:returns: the *Weather* object which is closest in time or ``None`` if the
list is empty
"""
if not weathers_list:
return None
if not is_in_coverage(unixtime, weathers_list):
raise api_response_error.NotFoundError('Error: the specified time is ' + \
'not included in the weather coverage range')
closest_weather = weathers_list[0]
time_distance = abs(closest_weather.get_reference_time() - unixtime)
for weather in weathers_list:
if abs(weather.get_reference_time() - unixtime) < time_distance:
time_distance = abs(weather.get_reference_time() - unixtime)
closest_weather = weather
return closest_weather |
python | def listen(url, prefix=None, **kwargs):
"""
bind and return a connection instance from url
arguments:
- url (str): xbahn connection url
"""
return listener(url, prefix=get_prefix(prefix), **kwargs) |
python | def resolve_out(self, ins):
"""
Determine which stream the output is synchronised with. If the incoming streams have different sync values, then
it is unknown what synchronisation the outgoing stream should have.
:param ins: dictionary of the incoming streams' sync values
:return:
"""
values = set()
for value in ins.values():
values.update(value)
if len(values) > 1:
msg = 'Unable to resolve sync stream. Consider adding a custom resolver to {}.'
raise ValueError(msg.format(self.step.name))
return {key: values for key in self.step.outs} |
java | public float getSphereBound(float[] sphere)
{
if ((sphere == null) || (sphere.length != 4) ||
((NativeVertexBuffer.getBoundingVolume(getNative(), sphere)) < 0))
{
throw new IllegalArgumentException("Cannot copy sphere bound into array provided");
}
return sphere[0];
} |
python | def Task(entry): # pylint: disable=invalid-name
"""
Decorator with which classes, who act as tasks in a `Lane`, must be decorated. When a class is
being decorated, it becomes a child of `LaneTask`.
Parameters
----------
entry: The name of the task's "main" method, i.e. the method which is executed when task is run
Returns
-------
wrapper (function): The actual decorator function
"""
if not isinstance(entry, string_types):
# In the event that no argument is supplied to the decorator, python passes the decorated
# class itself as an argument. That way, we can detect if no argument (or an argument of
# invalid type) was supplied. This allows passing of `entry` as both a named kwarg, and
# as an arg. Isn't neat, but for now it suffices.
raise TypeError('When decorating a class with `Task`, a single string argument must be '
'supplied, which specifies the "main" task method, i.e. the class\'s entry '
'point to the task.')
else:
def wrapper(cls):
"""The actual decorator function"""
if isclass(cls):
if not hasattr(cls, entry): # Check if cls has the specified entry method
raise TypeError('Method `%s` not found in class `%s`.' % (entry, cls.__name__))
# We will have to inspect the task class's `__init__` method later (by inspecting
# the arg signature, before it is instantiated). In various circumstances, classes
# will not have an unbound `__init__` method. Let's deal with that now already, by
# assigning an empty, unbound `__init__` method manually, in order to prevent
# errors later on during method inspection (not an issue in Python 3):
# - Whenever a class is not defined as a new-style class in Python 2.7, i.e. a
# sub-class of object, and it does not have a `__init__` method definition, the
# class will not have an attribute `__init__`
# - If a class misses a `__init__` method definition, but is defined as a
# new-style class, attribute `__init__` will be of type `slot wrapper`, which
# cannot be inspected (and it also doesn't seem possible to check if a method is of
# type `slot wrapper`, which is why we manually define one).
if not hasattr(cls, '__init__') or cls.__init__ == object.__init__:
init = MethodType(lambda self: None, None, cls) \
if PY2 else MethodType(lambda self: None, cls)
setattr(cls, '__init__', init)
# Check for attributes that will be overwritten, in order to warn the user
reserved_attributes = ('__getattr__', '__call__', '_entry_mtd', 'cache', 'uncache',
'clear_cache', '_log_lock')
for attr in dir(cls):
if attr in reserved_attributes:
make_default_logger(INTERNAL_LOGGER_NAME).warning(
'Attribute `%s` of class `%s` will be overwritten when decorated with '
'`sparklanes.Task`! Avoid assigning any of the following attributes '
'`%s`', attr, cls.__name__, str(reserved_attributes)
)
assignments = {'_entry_mtd': entry,
'__getattr__': lambda self, name: TaskCache.get(name),
'__init__': cls.__init__,
'_log_lock': Lock()}
for attr in WRAPPER_ASSIGNMENTS:
try:
assignments[attr] = getattr(cls, attr)
except AttributeError:
pass
# Build task as a subclass of LaneTask
return type('Task_%s' % cls.__name__, (LaneTask, cls, object), assignments)
else:
raise TypeError('Only classes can be decorated with `Task`')
return wrapper |
python | def use_plenary_hierarchy_view(self):
"""Pass through to provider HierarchyLookupSession.use_plenary_hierarchy_view"""
self._hierarchy_view = PLENARY
# self._get_provider_session('hierarchy_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_hierarchy_view()
except AttributeError:
pass |
java | public static <T, M extends BytesReader<T> & BytesWriter<? super T>> SetMarshaller<T> of(
M elementMarshaller) {
return of(elementMarshaller, elementMarshaller);
} |
java | public void validateCreate(AppNewForm appNewForm) {
// trim
if (appNewForm.getApp() != null) {
appNewForm.setApp(appNewForm.getApp().trim());
}
if (appNewForm.getDesc() != null) {
appNewForm.setDesc(appNewForm.getDesc().trim());
}
App app = appMgr.getByName(appNewForm.getApp());
if (app != null) {
throw new FieldException(AppNewForm.APP, "app.exist", null);
}
} |
java | public static void checkNode(Node node, Node.Type... validTypes) {
for (Node.Type type : validTypes) {
if (node.getNodeType() == type) {
return;
}
}
StringJoiner sj = new StringJoiner(", ");
for (Node.Type type : validTypes) {
sj.add(type.toString());
}
String nodeName = node.getNodeType().name();
if (node instanceof NameBearer) {
nodeName = ((NameBearer) node).getName();
}
throw new FuzzerException(nodeName + ": invalid, must be one of " + sj.toString());
} |
python | def build_swagger_12_api_declaration_view(api_declaration_json):
"""Thanks to the magic of closures, this means we gracefully return JSON
without file IO at request time.
"""
def view_for_api_declaration(request):
# Note that we rewrite basePath to always point at this server's root.
return dict(
api_declaration_json,
basePath=str(request.application_url),
)
return view_for_api_declaration |
java | public List<CmsBrokenLinkBean> renderBrokenLinkInheritanceGroup(CmsResource target, CmsResource source)
throws CmsException {
List<CmsBrokenLinkBean> result = new ArrayList<CmsBrokenLinkBean>();
try {
Set<String> names = CmsInheritanceGroupUtils.getNamesOfGroupsContainingResource(m_cms, source, target);
if (!names.isEmpty()) {
for (String name : names) {
String title = null;
String path = null;
String extraTitle = null;
String extraPath = null;
CmsResource group = CmsInheritanceGroupUtils.getInheritanceGroupContentByName(m_cms, name);
String groupParent = CmsResource.getParentFolder(source.getRootPath());
CmsProperty titleProp = m_cms.readPropertyObject(
group,
CmsPropertyDefinition.PROPERTY_TITLE,
false);
title = CmsResource.getName(group.getRootPath());
if (!titleProp.isNullProperty()) {
title = titleProp.getValue();
}
path = m_cms.getRequestContext().removeSiteRoot(source.getRootPath());
List<CmsRelation> relations = m_cms.readRelations(
CmsRelationFilter.relationsToStructureId(group.getStructureId()));
List<CmsResource> referencingPages = new ArrayList<CmsResource>();
for (CmsRelation relation : relations) {
CmsResource relSource = relation.getSource(m_cms, CmsResourceFilter.ALL);
String pageParent = CmsResource.getParentFolder(relSource.getRootPath());
if (CmsResourceTypeXmlContainerPage.isContainerPage(relSource)
&& pageParent.equals(groupParent)) {
referencingPages.add(relSource);
}
}
if (!referencingPages.isEmpty()) {
CmsResource firstPage = referencingPages.get(0);
extraPath = m_cms.getRequestContext().removeSiteRoot(firstPage.getRootPath());
extraTitle = m_cms.readPropertyObject(
firstPage,
CmsPropertyDefinition.PROPERTY_TITLE,
true).getValue();
}
result.add(
createBrokenLinkBean(
group.getStructureId(),
CmsResourceTypeXmlContainerPage.INHERIT_CONTAINER_CONFIG_TYPE_NAME,
title,
path,
extraTitle,
extraPath));
}
} else {
result.add(createSitemapBrokenLinkBean(source));
}
} catch (CmsException e) {
result.add(createSitemapBrokenLinkBean(source));
}
return result;
} |
java | private void probe(ImmutableMember member) {
LOGGER.trace("{} - Probing {}", localMember.id(), member);
bootstrapService.getMessagingService().sendAndReceive(
member.address(), MEMBERSHIP_PROBE, SERIALIZER.encode(Pair.of(localMember.copy(), member)), false, config.getProbeTimeout())
.whenCompleteAsync((response, error) -> {
if (error == null) {
updateState(SERIALIZER.decode(response));
} else {
LOGGER.debug("{} - Failed to probe {}", this.localMember.id(), member, error);
// Verify that the local member term has not changed and request probes from peers.
SwimMember swimMember = members.get(member.id());
if (swimMember != null && swimMember.getIncarnationNumber() == member.incarnationNumber()) {
requestProbes(swimMember.copy());
}
}
}, swimScheduler);
} |
python | def origin_id_to_name(self, origin):
""" Returns a localized origin name for a given ID """
try:
oid = int(origin)
except (ValueError, TypeError):
return None
return self.origins.get(oid) |
java | @SuppressWarnings({"unchecked", "rawtypes"})
public static <T extends Describable<T>,D extends Descriptor<T>>
DescriptorExtensionList<T,D> createDescriptorList(Jenkins jenkins, Class<T> describableType) {
if (describableType == (Class) Publisher.class) {
return (DescriptorExtensionList) new Publisher.DescriptorExtensionListImpl(jenkins);
}
return new DescriptorExtensionList<>(jenkins, describableType);
} |
python | def get_marshaller_for_type(self, tp):
""" Gets the appropriate marshaller for a type.
Retrieves the marshaller, if any, that can be used to read/write
a Python object with type 'tp'. The modules it requires, if
available, will be loaded.
Parameters
----------
tp : type or str
Python object ``type`` (which would be the class reference)
or its string representation like ``'collections.deque'``.
Returns
-------
marshaller : marshaller or None
The marshaller that can read/write the type to
file. ``None`` if no appropriate marshaller is found.
has_required_modules : bool
Whether the required modules for reading the type are
present or not.
See Also
--------
hdf5storage.Marshallers.TypeMarshaller.types
"""
if not isinstance(tp, str):
tp = tp.__module__ + '.' + tp.__name__
if tp in self._types:
index = self._types[tp]
else:
return None, False
m = self._marshallers[index]
if self._imported_required_modules[index]:
return m, True
if not self._has_required_modules[index]:
return m, False
success = self._import_marshaller_modules(m)
self._has_required_modules[index] = success
self._imported_required_modules[index] = success
return m, success |
python | async def StatusHistory(self, requests):
'''
requests : typing.Sequence[~StatusHistoryRequest]
Returns -> typing.Sequence[~StatusHistoryResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Client',
request='StatusHistory',
version=2,
params=_params)
_params['requests'] = requests
reply = await self.rpc(msg)
return reply |
python | def delete(self, record_key):
''' a method to delete a file
:param record_key: string with name of file
:return: string reporting outcome
'''
title = '%s.delete' % self.__class__.__name__
key_arg = '%s(record_key="%s")' % (title, record_key)
# validate inputs
record_key = self.fields.validate(record_key, '.record_key')
# construct path to file
file_path = os.path.join(self.collection_folder, record_key)
# validate existence of file
if not os.path.exists(file_path):
exit_msg = '%s does not exist.' % record_key
return exit_msg
# delete file asynchronously
# if non_blocking:
# self._delete(file_path, title, record_key)
# exit_msg = '%s will be deleted.' % record_key
# return exit_msg
# remove file
current_dir = os.path.split(file_path)[0]
try:
os.remove(file_path)
except:
raise Exception('%s failed to delete %s' % (key_arg, record_key))
# # remove empty directories in path to file
# if not os.listdir(current_dir):
# os.removedirs(current_dir)
# remove empty directories in path to file
while current_dir != self.collection_folder:
if not os.listdir(current_dir):
os.rmdir(current_dir)
current_dir = os.path.split(current_dir)[0]
else:
break
exit_msg = '%s has been deleted.' % record_key
return exit_msg |
python | def wrap_expr(self, src: str, dfltChaining: bool) -> str:
"""Wrap `src` in parentheses if neccessary."""
diff_binding = self.op_man.diff_binding()
if diff_binding < 0 or diff_binding == 0 and not dfltChaining:
return self.parenthesize(src)
else:
return src |
java | public BillingPlanUpdateResponse updatePlan(String accountId, BillingPlanInformation billingPlanInformation) throws ApiException {
return updatePlan(accountId, billingPlanInformation, null);
} |
java | public static Map<String, Object> mapDescribedProperties(final PropertyResolver resolver,
final Description description, final PropertyScope defaultPropertyScope) {
final List<Property> properties = description.getProperties();
return mapProperties(resolver, properties, defaultPropertyScope);
} |
python | def setSr(self, fs):
"""Sets the samplerate of the input operation being plotted"""
self.tracePlot.setSr(fs)
self.stimPlot.setSr(fs) |
python | def unassign_hosting_device_from_cfg_agent(self, context, cfg_agent_id,
hosting_device_id):
"""Make config agent handle an (unassigned) hosting device."""
hd_db = self._get_hosting_device(context, hosting_device_id)
if hd_db.cfg_agent_id is None and cfg_agent_id is None:
return
elif hd_db.cfg_agent_id != cfg_agent_id:
LOG.debug('Hosting device %(hd_id)s is not assigned to Cisco '
'cfg agent %(agent_id)s',
{'hd_id': hosting_device_id,
'agent_id': cfg_agent_id})
raise ciscocfgagentscheduler.HostingDeviceNotAssignedToCfgAgent(
hosting_device_id=hosting_device_id, agent_id=cfg_agent_id)
cfg_agent_db = get_agent_db_obj(self._get_agent(context, cfg_agent_id))
cfg_notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG)
if cfg_notifier:
cfg_notifier.hosting_devices_unassigned_from_cfg_agent(
context, [hosting_device_id], cfg_agent_db.host)
self._bind_hosting_device_to_cfg_agent(context, hd_db, None) |
python | def from_sites(cls, sites, charge=None, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell) |
python | def p_if_stmt(p):
"""
if_stmt : IF expr sep stmt_list_opt elseif_stmt END_STMT
| IF LPAREN expr RPAREN stmt_list_opt elseif_stmt END_STMT
"""
if len(p) == 7:
p[0] = node.if_stmt(cond_expr=p[2], then_stmt=p[4], else_stmt=p[5])
elif len(p) == 8:
p[0] = node.if_stmt(cond_expr=p[3], then_stmt=p[5], else_stmt=p[6])
else:
assert 0 |
java | public DocSample processSample(DocSample sample) {
if (sample.isClearAdaptiveDataSet()) {
this.docClassifier.clearFeatureData();
}
String[] document = sample.getTokens();
String cat = docClassifier.classify(document);
if (sample.getLabel().equals(cat)) {
accuracy.add(1);
}
else {
accuracy.add(0);
}
return new DocSample(cat, sample.getTokens(), sample.isClearAdaptiveDataSet());
} |
java | public static Seconds secondsBetween(ReadablePartial start, ReadablePartial end) {
if (start instanceof LocalTime && end instanceof LocalTime) {
Chronology chrono = DateTimeUtils.getChronology(start.getChronology());
int seconds = chrono.seconds().getDifference(
((LocalTime) end).getLocalMillis(), ((LocalTime) start).getLocalMillis());
return Seconds.seconds(seconds);
}
int amount = BaseSingleFieldPeriod.between(start, end, ZERO);
return Seconds.seconds(amount);
} |
python | def discretize(self, method, *args, **kwargs):
"""
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
"""
return method(self, *args, **kwargs).get_discrete_values() |
java | void recycle() {
flushed = false;
closed = false;
out = null;
nextChar = 0;
converterBuffer.clear(); //PM19500
response = null; //PM23029
} |
python | def commit(self):
"""Commit mutations to the database.
:rtype: datetime
:returns: timestamp of the committed changes.
"""
self._check_state()
database = self._session._database
api = database.spanner_api
metadata = _metadata_with_prefix(database.name)
txn_options = TransactionOptions(read_write=TransactionOptions.ReadWrite())
response = api.commit(
self._session.name,
self._mutations,
single_use_transaction=txn_options,
metadata=metadata,
)
self.committed = _pb_timestamp_to_datetime(response.commit_timestamp)
return self.committed |
python | def _multiplyThroughputs(self):
''' Overrides base class in order to deal with opaque components.
'''
index = 0
for component in self.components:
if component.throughput != None:
break
index += 1
return BaseObservationMode._multiplyThroughputs(self, index) |
python | def get_address(customer_id, data):
"""
Easier to fetch the addresses of customer and then check one by one.
You can get fancy by using some validation mechanism too
"""
Address = client.model('party.address')
addresses = Address.find(
[('party', '=', customer_id)],
fields=[
'name', 'street', 'street_bis', 'city', 'zip',
'subdivision.code', 'country.code'
]
)
for address in addresses:
if (
address['name'] == data['name'] and
address['street'] == data['street'] and
address['street_bis'] == data['street_bis'] and
address['city'] == data['city'] and
address['zip'] == data['zip'] and
address['subdivision.code'].endswith(data['state']) and
address['country.code'] == data['country']):
return address['id'] |
java | public Metric getMetricAsync(String metricName) throws ExecutionException, InterruptedException {
// [START getMetricAsync]
Future<Metric> future = logging.getMetricAsync(metricName);
// ...
Metric metric = future.get();
if (metric == null) {
// metric was not found
}
// [END getMetricAsync]
return metric;
} |
java | public List<ContactsResponse> getCharactersCharacterIdContacts(Integer characterId, String datasource,
String ifNoneMatch, Integer page, String token) throws ApiException {
ApiResponse<List<ContactsResponse>> resp = getCharactersCharacterIdContactsWithHttpInfo(characterId,
datasource, ifNoneMatch, page, token);
return resp.getData();
} |
python | async def _async_wait_for_process(
future_process: Any,
out: Optional[Union[TeeCapture, IO[str]]] = sys.stdout,
err: Optional[Union[TeeCapture, IO[str]]] = sys.stderr
) -> CommandOutput:
"""Awaits the creation and completion of an asynchronous process.
Args:
future_process: The eventually created process.
out: Where to write stuff emitted by the process' stdout.
err: Where to write stuff emitted by the process' stderr.
Returns:
A (captured output, captured error output, return code) triplet.
"""
process = await future_process
future_output = _async_forward(process.stdout, out)
future_err_output = _async_forward(process.stderr, err)
output, err_output = await asyncio.gather(future_output, future_err_output)
await process.wait()
return CommandOutput(output, err_output, process.returncode) |
java | public void addPrimaryKeysIfSourced() {
final List<Column> primaryKeyColumns = getTable().getPrimaryKeys();
if (primaryKeyColumns == null || primaryKeyColumns.isEmpty()) {
logger.info("No primary keys defined for table {}, not pre-selecting primary keys", getTable().getName());
return;
}
final Collection<InputColumn<?>> sourceInputColumns = getAnalysisJob().getSourceColumns();
final List<Column> sourceColumns = CollectionUtils.map(sourceInputColumns, InputColumn::getPhysicalColumn);
for (final Column primaryKeyColumn : primaryKeyColumns) {
if (!sourceColumns.contains(primaryKeyColumn)) {
logger.info("Primary key column {} not added to source columns, not pre-selecting primary keys");
return;
}
}
addPhysicalColumns(primaryKeyColumns);
} |
java | @Override
public String parse(JsonPullParser parser, OnJsonObjectAddListener listener)
throws IOException, JsonFormatException {
if (parser == null) {
throw new IllegalArgumentException();
}
State state = parser.getEventType();
switch (state) {
case VALUE_NULL:
return null;
case VALUE_STRING:
return parser.getValueString();
default:
throw new IllegalStateException();
}
} |
java | public Map<Integer, int[][]> getGridNodesPerMoneyness() {
//See if the map has already been instantiated.
if(keyMap != null) {
return Collections.unmodifiableMap(keyMap);
}
//Otherwise create the map and return it.
Map<Integer, List<Set<Integer>>> newMap = new HashMap<>();
for(DataKey key : entryMap.keySet()) {
if(! newMap.containsKey(key.moneyness)) {
newMap.put(key.moneyness, new ArrayList<Set<Integer>>());
newMap.get(key.moneyness).add(new HashSet<Integer>());
newMap.get(key.moneyness).add(new HashSet<Integer>());
}
newMap.get(key.moneyness).get(0).add(key.maturity);
newMap.get(key.moneyness).get(1).add(key.tenor);
}
Map<Integer, int[][]> keyMap = new TreeMap<>();
for(int moneyness : newMap.keySet()) {
int[][] values = new int[2][];
values[0] = newMap.get(moneyness).get(0).stream().sorted().mapToInt(Integer::intValue).toArray();
values[1] = newMap.get(moneyness).get(1).stream().sorted().mapToInt(Integer::intValue).toArray();
keyMap.put(moneyness, values);
}
this.keyMap = keyMap;
return Collections.unmodifiableMap(keyMap);
} |
python | def uri(self):
"""return the uri, which is everything but base (no scheme, host, etc)"""
uristring = self.path
if self.query:
uristring += "?{}".format(self.query)
if self.fragment:
uristring += "#{}".format(self.fragment)
return uristring |
python | def new_item(self, hash_key, range_key=None, attrs=None):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
"""
return Item(self, hash_key, range_key, attrs) |
python | def load_yaml(filename):
"""Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'rb') as f:
return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc) |
java | @Nullable
public static <ENUMTYPE extends Enum <ENUMTYPE> & IHasID <String>> ENUMTYPE getFromIDCaseInsensitiveOrNull (@Nonnull final Class <ENUMTYPE> aClass,
@Nullable final String sID)
{
return getFromIDCaseInsensitiveOrDefault (aClass, sID, null);
} |
python | def build_native_type_dictionary(fields, respect_required=False, wrap_field=True, name=''):
"""
This function takes a list of type summaries and builds a dictionary
with native representations of each entry. Useful for dynamically
building native class records from summaries.
"""
# a place to start when building the input field attributes
input_fields = {}
# go over every input in the summary
for field in fields:
field_name = name + field['name']
field_type = field['type']
# if the type field is a string
if isinstance(field_type, str):
# compute the native api type for the field
field_type = convert_typestring_to_api_native(field_type)(
# required=respect_required and field['required']
)
# add an entry in the attributes
input_fields[field['name']] = field_type
# we could also be looking at a dictionary
elif isinstance(field_type, dict):
object_fields = field_type['fields']
# add the dictionary to the parent as a graphql object type
input_fields[field['name']] = graphql_type_from_summary(
summary={
'name': field_name+"ArgType",
'fields': object_fields
}
)
# if we are supposed to wrap the object in a field
if wrap_field:
# then wrap the value we just added
input_fields[field['name']] = graphene.Field(input_fields[field['name']])
# we're done
return input_fields |
java | public void polyAddScaleB(GrowQueue_I8 polyA , GrowQueue_I8 polyB , int scaleB , GrowQueue_I8 output ) {
output.resize(Math.max(polyA.size,polyB.size));
// compute offset that would align the smaller polynomial with the larger polynomial
int offsetA = Math.max(0,polyB.size-polyA.size);
int offsetB = Math.max(0,polyA.size-polyB.size);
int N = output.size;
for (int i = 0; i < offsetB; i++) {
output.data[i] = polyA.data[i];
}
for (int i = 0; i < offsetA; i++) {
output.data[i] = (byte)multiply(polyB.data[i]&0xFF,scaleB);
}
for (int i = Math.max(offsetA,offsetB); i < N; i++) {
output.data[i] = (byte)((polyA.data[i-offsetA]&0xFF) ^ multiply(polyB.data[i-offsetB]&0xFF,scaleB));
}
} |
python | def validate_input_source_config_source_candidate_candidate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
validate = ET.Element("validate")
config = validate
input = ET.SubElement(validate, "input")
source = ET.SubElement(input, "source")
config_source = ET.SubElement(source, "config-source")
candidate = ET.SubElement(config_source, "candidate")
candidate = ET.SubElement(candidate, "candidate")
callback = kwargs.pop('callback', self._callback)
return callback(config) |
python | def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val) |
python | def fetch(self):
"""
Fetch a FunctionVersionInstance
:returns: Fetched FunctionVersionInstance
:rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return FunctionVersionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
function_sid=self._solution['function_sid'],
sid=self._solution['sid'],
) |
python | def _add_image_part(self, image):
"""
Return an |ImagePart| instance newly created from image and appended
to the collection.
"""
partname = self._next_image_partname(image.ext)
image_part = ImagePart.from_image(image, partname)
self.append(image_part)
return image_part |
python | def fbp_filter_op(ray_trafo, padding=True, filter_type='Ram-Lak',
frequency_scaling=1.0):
"""Create a filter operator for FBP from a `RayTransform`.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform (forward operator) whose approximate inverse should
be computed. Its geometry has to be any of the following
`Parallel2dGeometry` : Exact reconstruction
`Parallel3dAxisGeometry` : Exact reconstruction
`FanBeamGeometry` : Approximate reconstruction, correct in limit of
fan angle = 0.
Only flat detectors are supported (det_curvature_radius is None).
`ConeFlatGeometry`, pitch = 0 (circular) : Approximate reconstruction,
correct in the limit of fan angle = 0 and cone angle = 0.
`ConeFlatGeometry`, pitch > 0 (helical) : Very approximate unless a
`tam_danielson_window` is used. Accurate with the window.
Other geometries: Not supported
padding : bool, optional
If the data space should be zero padded. Without padding, the data may
be corrupted due to the circular convolution used. Using padding makes
the algorithm slower.
filter_type : optional
The type of filter to be used.
The predefined options are, in approximate order from most noise
senstive to least noise sensitive:
``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and
``'Hann'``.
A callable can also be provided. It must take an array of values in
[0, 1] and return the filter for these frequencies.
frequency_scaling : float, optional
Relative cutoff frequency for the filter.
The normalized frequencies are rescaled so that they fit into the range
[0, frequency_scaling]. Any frequency above ``frequency_scaling`` is
set to zero.
Returns
-------
filter_op : `Operator`
Filtering operator for FBP based on ``ray_trafo``.
See Also
--------
tam_danielson_window : Windowing for helical data
"""
impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy'
alen = ray_trafo.geometry.motion_params.length
if ray_trafo.domain.ndim == 2:
# Define ramp filter
def fourier_filter(x):
abs_freq = np.abs(x[1])
norm_freq = abs_freq / np.max(abs_freq)
filt = _fbp_filter(norm_freq, filter_type, frequency_scaling)
scaling = 1 / (2 * alen)
return filt * np.max(abs_freq) * scaling
# Define (padded) fourier transform
if padding:
# Define padding operator
ran_shp = (ray_trafo.range.shape[0],
ray_trafo.range.shape[1] * 2 - 1)
resizing = ResizingOperator(ray_trafo.range, ran_shp=ran_shp)
fourier = FourierTransform(resizing.range, axes=1, impl=impl)
fourier = fourier * resizing
else:
fourier = FourierTransform(ray_trafo.range, axes=1, impl=impl)
elif ray_trafo.domain.ndim == 3:
# Find the direction that the filter should be taken in
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# Find what axes should be used in the fourier transform
used_axes = (rot_dir != 0)
if used_axes[0] and not used_axes[1]:
axes = [1]
elif not used_axes[0] and used_axes[1]:
axes = [2]
else:
axes = [1, 2]
# Add scaling for cone-beam case
if hasattr(ray_trafo.geometry, 'src_radius'):
scale = (ray_trafo.geometry.src_radius
/ (ray_trafo.geometry.src_radius
+ ray_trafo.geometry.det_radius))
if ray_trafo.geometry.pitch != 0:
# In helical geometry the whole volume is not in each
# projection and we need to use another weighting.
# Ideally each point in the volume effects only
# the projections in a half rotation, so we assume that that
# is the case.
scale *= alen / (np.pi)
else:
scale = 1.0
# Define ramp filter
def fourier_filter(x):
# If axis is aligned to a coordinate axis, save some memory and
# time by using broadcasting
if not used_axes[0]:
abs_freq = np.abs(rot_dir[1] * x[2])
elif not used_axes[1]:
abs_freq = np.abs(rot_dir[0] * x[1])
else:
abs_freq = np.abs(rot_dir[0] * x[1] + rot_dir[1] * x[2])
norm_freq = abs_freq / np.max(abs_freq)
filt = _fbp_filter(norm_freq, filter_type, frequency_scaling)
scaling = scale * np.max(abs_freq) / (2 * alen)
return filt * scaling
# Define (padded) fourier transform
if padding:
# Define padding operator
if used_axes[0]:
padded_shape_u = ray_trafo.range.shape[1] * 2 - 1
else:
padded_shape_u = ray_trafo.range.shape[1]
if used_axes[1]:
padded_shape_v = ray_trafo.range.shape[2] * 2 - 1
else:
padded_shape_v = ray_trafo.range.shape[2]
ran_shp = (ray_trafo.range.shape[0],
padded_shape_u,
padded_shape_v)
resizing = ResizingOperator(ray_trafo.range, ran_shp=ran_shp)
fourier = FourierTransform(resizing.range, axes=axes, impl=impl)
fourier = fourier * resizing
else:
fourier = FourierTransform(ray_trafo.range, axes=axes, impl=impl)
else:
raise NotImplementedError('FBP only implemented in 2d and 3d')
# Create ramp in the detector direction
ramp_function = fourier.range.element(fourier_filter)
weight = 1
if not ray_trafo.range.is_weighted:
# Compensate for potentially unweighted range of the ray transform
weight *= ray_trafo.range.cell_volume
if not ray_trafo.domain.is_weighted:
# Compensate for potentially unweighted domain of the ray transform
weight /= ray_trafo.domain.cell_volume
ramp_function *= weight
# Create ramp filter via the convolution formula with fourier transforms
return fourier.inverse * ramp_function * fourier |
java | public static <T extends ImageGray<T>>T average( ImageMultiBand input , T output ) {
if( input instanceof Planar ) {
return (T)average((Planar)input,output);
} else if( input instanceof ImageInterleaved ) {
return (T)average((ImageInterleaved)input,output);
} else {
throw new RuntimeException("Unknown multiband image");
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.