language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python
|
def question(self, n):
"""Choose what do to file by file
"""
print("")
prompt_ask = raw_input("{0} [K/O/R/D/M/Q]? ".format(n))
print("")
if prompt_ask in ("K", "k"):
self.keep()
elif prompt_ask in ("O", "o"):
self._overwrite(n)
elif prompt_ask in ("R", "r"):
self._remove(n)
elif prompt_ask in ("D", "d"):
self.diff(n)
self.i -= 1
elif prompt_ask in ("M", "m"):
self.merge(n)
elif prompt_ask in ("Q", "q", "quit"):
self.quit()
|
python
|
def add_mapping(agent, prefix, ip):
"""Adds a mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, AddMappingManager, RecordType.record_A,
prefix, ip)
|
java
|
public void marshall(GetPipelineStateRequest getPipelineStateRequest, ProtocolMarshaller protocolMarshaller) {
if (getPipelineStateRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(getPipelineStateRequest.getName(), NAME_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def _parse_integrator(int_method):
"""parse the integrator method to pass to C"""
#Pick integrator
if int_method.lower() == 'rk4_c':
int_method_c= 1
elif int_method.lower() == 'rk6_c':
int_method_c= 2
elif int_method.lower() == 'symplec4_c':
int_method_c= 3
elif int_method.lower() == 'symplec6_c':
int_method_c= 4
elif int_method.lower() == 'dopr54_c':
int_method_c= 5
elif int_method.lower() == 'dop853_c':
int_method_c= 6
else:
int_method_c= 0
return int_method_c
|
python
|
def _parse_request(self, enc_request, request_cls, service, binding):
"""Parse a Request
:param enc_request: The request in its transport format
:param request_cls: The type of requests I expect
:param service:
:param binding: Which binding that was used to transport the message
to this entity.
:return: A request instance
"""
_log_info = logger.info
_log_debug = logger.debug
# The addresses I should receive messages like this on
receiver_addresses = self.config.endpoint(service, binding,
self.entity_type)
if not receiver_addresses and self.entity_type == "idp":
for typ in ["aa", "aq", "pdp"]:
receiver_addresses = self.config.endpoint(service, binding, typ)
if receiver_addresses:
break
_log_debug("receiver addresses: %s", receiver_addresses)
_log_debug("Binding: %s", binding)
try:
timeslack = self.config.accepted_time_diff
if not timeslack:
timeslack = 0
except AttributeError:
timeslack = 0
_request = request_cls(self.sec, receiver_addresses,
self.config.attribute_converters,
timeslack=timeslack)
xmlstr = self.unravel(enc_request, binding, request_cls.msgtype)
must = self.config.getattr("want_authn_requests_signed", "idp")
only_valid_cert = self.config.getattr(
"want_authn_requests_only_with_valid_cert", "idp")
if only_valid_cert is None:
only_valid_cert = False
if only_valid_cert:
must = True
_request = _request.loads(xmlstr, binding, origdoc=enc_request,
must=must, only_valid_cert=only_valid_cert)
_log_debug("Loaded request")
if _request:
_request = _request.verify()
_log_debug("Verified request")
if not _request:
return None
else:
return _request
|
python
|
def handle(self, dict):
'''
Processes a vaild stats request
@param dict: a valid dictionary object
'''
# format key
key = "statsrequest:{stats}:{appid}".format(
stats=dict['stats'],
appid=dict['appid'])
self.redis_conn.set(key, dict['uuid'])
dict['parsed'] = True
dict['valid'] = True
self.logger.info('Added stat request to Redis', extra=dict)
|
java
|
@Override
public LocalDateTime plus(long amountToAdd, TemporalUnit unit) {
if (unit instanceof ChronoUnit) {
ChronoUnit f = (ChronoUnit) unit;
switch (f) {
case NANOS: return plusNanos(amountToAdd);
case MICROS: return plusDays(amountToAdd / MICROS_PER_DAY).plusNanos((amountToAdd % MICROS_PER_DAY) * 1000);
case MILLIS: return plusDays(amountToAdd / MILLIS_PER_DAY).plusNanos((amountToAdd % MILLIS_PER_DAY) * 1000000);
case SECONDS: return plusSeconds(amountToAdd);
case MINUTES: return plusMinutes(amountToAdd);
case HOURS: return plusHours(amountToAdd);
case HALF_DAYS: return plusDays(amountToAdd / 256).plusHours((amountToAdd % 256) * 12); // no overflow (256 is multiple of 2)
}
return with(date.plus(amountToAdd, unit), time);
}
return unit.addTo(this, amountToAdd);
}
|
java
|
protected SSLEngineResult.HandshakeStatus doTasks() {
Runnable runnable;
// We could run this in a separate thread, but do in the current for
// now.
while ((runnable = sslEngine.getDelegatedTask()) != null) {
runnable.run();
}
return sslEngine.getHandshakeStatus();
}
|
java
|
private void addQueryParams(final Request request) {
if (identity != null) {
for (String prop : identity) {
request.addQueryParam("Identity", prop);
}
}
if (getPageSize() != null) {
request.addQueryParam("PageSize", Integer.toString(getPageSize()));
}
}
|
java
|
public static ORCSchemaProvider createORCSchemaProvider(
String className, SecorConfig config) throws Exception {
Class<?> clazz = Class.forName(className);
if (!ORCSchemaProvider.class.isAssignableFrom(clazz)) {
throw new IllegalArgumentException(String.format(
"The class '%s' is not assignable to '%s'.", className,
ORCSchemaProvider.class.getName()));
}
return (ORCSchemaProvider) clazz.getConstructor(SecorConfig.class)
.newInstance(config);
}
|
python
|
def detach_storage(self, server, address):
"""
Detach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {'address': address}}
url = '/server/{0}/storage/detach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)
|
java
|
public static FieldUpdater createFieldUpdater(ObjectUpdater objUpdater, DBObject dbObj, String fieldName) {
if (fieldName.charAt(0) == '_') {
if (fieldName.equals(CommonDefs.ID_FIELD)) {
return new IDFieldUpdater(objUpdater, dbObj);
} else {
// Allow but skip all other system fields (e.g., "_table")
return new NullFieldUpdater(objUpdater, dbObj, fieldName);
}
}
TableDefinition tableDef = objUpdater.getTableDef();
if (tableDef.isLinkField(fieldName)) {
return new LinkFieldUpdater(objUpdater, dbObj, fieldName);
} else {
Utils.require(FieldDefinition.isValidFieldName(fieldName), "Invalid field name: %s", fieldName);
return new ScalarFieldUpdater(objUpdater, dbObj, fieldName);
}
}
|
python
|
def rename_page(self, old_slug, new_title):
'''Load the page corresponding to the slug, and rename it.'''
#load page
p = s2page.Page(self, old_slug, isslug=True)
p.rename(new_title)
|
python
|
def empirical_rate(data, sigma=3.0):
"""
Smooth count data to get an empirical rate
"""
from scipy.ndimage.filters import gaussian_filter1d
return 0.001 + gaussian_filter1d(data.astype(np.float), sigma, axis=0)
|
java
|
public BaseGrid setAxisLabel(String... axisLabels) {
if (axisLabels.length != base.getDimension()) {
throw new IllegalArgumentException("Axis label size don't match base dimension.");
}
for (int i = 0; i < axisLabels.length; i++) {
axis[i].setAxisLabel(axisLabels[i]);
}
return this;
}
|
python
|
def set_heap(self, heap_dump, heap_base):
"""
Heap dump is a dump of the heap from gdb, i.e. the result of the
following gdb command:
``dump binary memory [stack_dump] [begin] [end]``
:param heap_dump: The dump file.
:param heap_base: The start address of the heap in the gdb session.
"""
# We set the heap at the same addresses as the gdb session to avoid pointer corruption.
data = self._read_data(heap_dump)
self.state.heap.heap_location = heap_base + len(data)
addr = heap_base
l.info("Set heap from 0x%x to %#x", addr, addr+len(data))
#FIXME: we should probably make we don't overwrite other stuff loaded there
self._write(addr, data)
|
python
|
def sendFuture(self, future):
"""Send a Future to be executed remotely."""
try:
if shared.getConst(hash(future.callable),
timeout=0):
# Enforce name reference passing if already shared
future.callable = SharedElementEncapsulation(hash(future.callable))
self.socket.send_multipart([b"TASK",
pickle.dumps(future,
pickle.HIGHEST_PROTOCOL)])
except pickle.PicklingError as e:
# If element not picklable, pickle its name
# TODO: use its fully qualified name
scoop.logger.warn("Pickling Error: {0}".format(e))
previousCallable = future.callable
future.callable = hash(future.callable)
self.socket.send_multipart([b"TASK",
pickle.dumps(future,
pickle.HIGHEST_PROTOCOL)])
future.callable = previousCallable
|
java
|
@GET
public Response getStores() {
String msg = "getting stores.";
try {
return doGetStores(msg);
} catch (StorageException se) {
return responseBad(msg, se);
} catch (Exception e) {
return responseBad(msg, e);
}
}
|
java
|
public Response edit(String noteId, int noteX, int noteY, int noteWidth, int noteHeight, String noteText) throws JinxException {
JinxUtils.validateParams(noteId, noteText);
Map<String, String> params = new TreeMap<>();
params.put("method", "flickr.photos.notes.edit");
params.put("note_id", noteId);
params.put("note_x", Integer.toString(noteX));
params.put("note_y", Integer.toString(noteY));
params.put("note_w", Integer.toString(noteWidth));
params.put("note_h", Integer.toString(noteHeight));
params.put("note_text", noteText);
return jinx.flickrPost(params, Response.class);
}
|
java
|
@Override
public Config getOwnConfig(ConfigKeyPath configKey, String version) throws VersionDoesNotExistException {
Preconditions.checkNotNull(configKey, "configKey cannot be null!");
Preconditions.checkArgument(!Strings.isNullOrEmpty(version), "version cannot be null or empty!");
Path datasetDir = getDatasetDirForKey(configKey, version);
Path mainConfFile = new Path(datasetDir, MAIN_CONF_FILE_NAME);
try {
if (!this.fs.exists(mainConfFile)) {
return ConfigFactory.empty();
}
FileStatus configFileStatus = this.fs.getFileStatus(mainConfFile);
if (!configFileStatus.isDirectory()) {
try (InputStream mainConfInputStream = this.fs.open(configFileStatus.getPath())) {
return ConfigFactory.parseReader(new InputStreamReader(mainConfInputStream, Charsets.UTF_8));
}
}
return ConfigFactory.empty();
} catch (IOException e) {
throw new RuntimeException(String.format("Error while getting config for configKey: \"%s\"", configKey), e);
}
}
|
java
|
private static final int find(final Memory mem, final int lgArr,
final int coupon) {
final int arrMask = (1 << lgArr) - 1;
int probe = coupon & arrMask;
final int loopIndex = probe;
do {
final int couponAtIndex = extractInt(mem, HASH_SET_INT_ARR_START + (probe << 2));
if (couponAtIndex == EMPTY) { return ~probe; } //empty
else if (coupon == couponAtIndex) { return probe; } //duplicate
final int stride = ((coupon & KEY_MASK_26) >>> lgArr) | 1;
probe = (probe + stride) & arrMask;
} while (probe != loopIndex);
throw new SketchesArgumentException("Key not found and no empty slots!");
}
|
java
|
private static final int toIndex(List<?> base, Object property) {
int index = 0;
if (property instanceof Number) {
index = ((Number) property).intValue();
} else if (property instanceof String) {
try {
index = Integer.valueOf((String) property);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Cannot parse list index: " + property);
}
} else if (property instanceof Character) {
index = ((Character) property).charValue();
} else if (property instanceof Boolean) {
index = ((Boolean) property).booleanValue() ? 1 : 0;
} else {
throw new IllegalArgumentException("Cannot coerce property to list index: " + property);
}
if (base != null && (index < 0 || index >= base.size())) {
throw new PropertyNotFoundException("List index out of bounds: " + index);
}
return index;
}
|
python
|
def parse_from_json(json_str):
"""
Given a Unified Uploader message, parse the contents and return a
MarketOrderList or MarketHistoryList instance.
:param str json_str: A Unified Uploader message as a JSON string.
:rtype: MarketOrderList or MarketHistoryList
:raises: MalformedUploadError when invalid JSON is passed in.
"""
try:
message_dict = json.loads(json_str)
except ValueError:
raise ParseError("Mal-formed JSON input.")
upload_keys = message_dict.get('uploadKeys', False)
if upload_keys is False:
raise ParseError(
"uploadKeys does not exist. At minimum, an empty array is required."
)
elif not isinstance(upload_keys, list):
raise ParseError(
"uploadKeys must be an array object."
)
upload_type = message_dict['resultType']
try:
if upload_type == 'orders':
return orders.parse_from_dict(message_dict)
elif upload_type == 'history':
return history.parse_from_dict(message_dict)
else:
raise ParseError(
'Unified message has unknown upload_type: %s' % upload_type)
except TypeError as exc:
# MarketOrder and HistoryEntry both raise TypeError exceptions if
# invalid input is encountered.
raise ParseError(exc.message)
|
java
|
public final void synpred45_InternalPureXbase_fragment() throws RecognitionException {
// InternalPureXbase.g:5593:4: ( '!' | '-' | '+' | 'new' | '{' | 'switch' | 'synchronized' | '<' | 'super' | '#' | '[' | 'false' | 'true' | 'null' | 'typeof' | 'if' | 'for' | 'while' | 'do' | 'throw' | 'return' | 'try' | '(' | RULE_ID | RULE_HEX | RULE_INT | RULE_DECIMAL | RULE_STRING )
// InternalPureXbase.g:
{
if ( (input.LA(1)>=RULE_STRING && input.LA(1)<=RULE_ID)||(input.LA(1)>=14 && input.LA(1)<=15)||input.LA(1)==28||(input.LA(1)>=44 && input.LA(1)<=45)||input.LA(1)==50||(input.LA(1)>=58 && input.LA(1)<=59)||input.LA(1)==61||input.LA(1)==64||input.LA(1)==66||(input.LA(1)>=69 && input.LA(1)<=80) ) {
input.consume();
state.errorRecovery=false;state.failed=false;
}
else {
if (state.backtracking>0) {state.failed=true; return ;}
MismatchedSetException mse = new MismatchedSetException(null,input);
throw mse;
}
}
}
|
python
|
def find_donor_catchments(self, limit=6, dist_limit=500):
"""
Return a suitable donor catchment to improve a QMED estimate based on catchment descriptors alone.
:param limit: maximum number of catchments to return. Default: 6. Set to `None` to return all available
catchments.
:type limit: int
:param dist_limit: maximum distance in km. between subject and donor catchment. Default: 500 km. Increasing the
maximum distance will increase computation time!
:type dist_limit: float or int
:return: list of nearby catchments
:rtype: :class:`floodestimation.entities.Catchment`
"""
if self.gauged_catchments:
return self.gauged_catchments.nearest_qmed_catchments(self.catchment, limit, dist_limit)
else:
return []
|
java
|
public void init(Record record, int iFieldSeq, String fieldName, int iSecondaryFieldSeq, String secondaryFieldName)
{
Converter converter = null;
m_recMerge = record;
m_iFieldSeq = iFieldSeq;
m_iSecondaryFieldSeq = iSecondaryFieldSeq;
this.fieldName = fieldName;
this.secondaryFieldName = secondaryFieldName;
if (record != null)
converter = this.getTargetField(null);
super.init(converter, null);
if (record != null)
record.addListener(new RemoveConverterOnCloseHandler(this)); // Because this is a converter (not a fieldConverter)
if (m_recMerge != null)
if (m_recMerge.getTable() instanceof MultiTable)
{ // Add all the fields in the sub-records
MultiTable multiTable = (MultiTable)m_recMerge.getTable();
Iterator<BaseTable> iterator = multiTable.getTables();
while (iterator.hasNext())
{
BaseTable table = (BaseTable)iterator.next();
Converter field = this.getTargetField(table.getRecord());
this.addConverterToPass(field); // Add it, and
}
}
}
|
python
|
def get_uris(self, base_uri, filter_list=None):
"""Return a set of internal URIs."""
return {
re.sub(r'^/', base_uri, link.attrib['href'])
for link in self.parsedpage.get_nodes_by_selector('a')
if 'href' in link.attrib and (
link.attrib['href'].startswith(base_uri) or
link.attrib['href'].startswith('/')
) and
not is_uri_to_be_filtered(link.attrib['href'], filter_list)
}
|
java
|
public static boolean lossyEquals(final Locale locale, final String source, final String target)
{
return withinLocale(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return lossyEquals(source, target);
}
}, locale);
}
|
java
|
@Override
protected void unserializeFrom(RawDataBuffer in)
{
super.unserializeFrom(in);
sessionId = new IntegerID(in.readInt());
}
|
java
|
public static void sendTextBlocking(final String message, final WebSocketChannel wsChannel) throws IOException {
final ByteBuffer data = ByteBuffer.wrap(message.getBytes(StandardCharsets.UTF_8));
sendBlockingInternal(data, WebSocketFrameType.TEXT, wsChannel);
}
|
java
|
@Override public List<MemberAnnotatedWithAtStat> apply(Class<?> clazz) {
List<MemberAnnotatedWithAtStat> annotatedMembers = Lists.newArrayList();
for (Class<?> currentClass = clazz;
currentClass != Object.class;
currentClass = currentClass.getSuperclass()) {
for (Method method : currentClass.getDeclaredMethods()) {
Stat stat = method.getAnnotation(Stat.class);
if (stat != null && staticMemberPolicy.shouldAccept(method)) {
annotatedMembers.add(new MemberAnnotatedWithAtStat(stat, method));
}
}
for (Field field : currentClass.getDeclaredFields()) {
Stat stat = field.getAnnotation(Stat.class);
if (stat != null && staticMemberPolicy.shouldAccept(field)) {
annotatedMembers.add(new MemberAnnotatedWithAtStat(stat, field));
}
}
}
return annotatedMembers;
}
|
java
|
private void initEntityClass() {
if (from == null) {
throw new JPQLParseException("Bad query format FROM clause is mandatory for SELECT queries");
}
String fromArray[] = from.split(" ");
if (!this.isDeleteUpdate) {
if (fromArray.length == 3 && fromArray[1].equalsIgnoreCase("as")) {
fromArray = new String[] { fromArray[0], fromArray[2] };
}
if (fromArray.length != 2) {
throw new JPQLParseException("Bad query format: " + from
+ ". Identification variable is mandatory in FROM clause for SELECT queries");
}
// TODO
StringTokenizer tokenizer = new StringTokenizer(result[0], ",");
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (!StringUtils.containsAny(fromArray[1] + ".", token)) {
throw new QueryHandlerException("bad query format with invalid alias:" + token);
}
}
}
this.entityName = fromArray[0];
if (fromArray.length == 2)
this.entityAlias = fromArray[1];
persistenceUnit = kunderaMetadata.getApplicationMetadata().getMappedPersistenceUnit(entityName);
// Get specific metamodel.
MetamodelImpl model = getMetamodel(persistenceUnit);
if (model != null) {
entityClass = model.getEntityClass(entityName);
}
if (null == entityClass) {
logger.error(
"No entity {} found, please verify it is properly annotated with @Entity and not a mapped Super class",
entityName);
throw new QueryHandlerException("No entity found by the name: " + entityName);
}
EntityMetadata metadata = model.getEntityMetadata(entityClass);
if (metadata != null && !metadata.isIndexable()) {
throw new QueryHandlerException(entityClass + " is not indexed. Not possible to run a query on it."
+ " Check whether it was properly annotated for indexing.");
}
}
|
java
|
public void visitPreDestroyMethod(Object declaringType,
Object returnType,
String methodName) {
visitPreDestroyMethodDefinition();
final MethodVisitData methodVisitData = new MethodVisitData(
declaringType,
false,
returnType,
methodName,
Collections.emptyMap(),
Collections.emptyMap(),
Collections.emptyMap(),
AnnotationMetadata.EMPTY_METADATA);
preDestroyMethodVisits.add(methodVisitData);
visitMethodInjectionPointInternal(methodVisitData,
constructorVisitor,
preDestroyMethodVisitor,
preDestroyInstanceIndex,
ADD_PRE_DESTROY_METHOD);
}
|
java
|
public void assignNestedDrivers(XTrace trace,
Map<List<String>, String> drivers) {
XCostDriver.instance().assignNestedValues(trace, drivers);
}
|
python
|
def _save_account(self, account, username):
""" Called when account is created/updated. With username override. """
# retrieve default project, or use null project if none
default_project_name = self._null_project
if account.default_project is not None:
default_project_name = account.default_project.pid
# account created
# account updated
ds_user = self.get_user(username)
if account.date_deleted is None:
# date_deleted is not set, user should exist
logger.debug("account is active")
if ds_user is None:
# create user if doesn't exist
self._call([
"add", "user",
"accounts=%s" % default_project_name,
"defaultaccount=%s" % default_project_name,
"name=%s" % username])
else:
# or just set default project
self._call([
"modify", "user",
"set", "defaultaccount=%s" % default_project_name,
"where", "name=%s" % username])
# update user meta information
# add rest of projects user belongs to
slurm_projects = self.get_projects_in_user(username)
slurm_projects = [project.lower() for project in slurm_projects]
slurm_projects = set(slurm_projects)
for project in account.person.projects.all():
if project.pid.lower() not in slurm_projects:
self._call([
"add", "user",
"name=%s" % username,
"accounts=%s" % project.pid])
else:
# date_deleted is not set, user should not exist
logger.debug("account is not active")
self._delete_account(username)
return
|
java
|
public void addAll(IntBitRelation right) {
int i, max;
max = right.line.length;
for (i = 0; i < max; i++) {
if (right.line[i] != null) {
add(i, right.line[i]);
}
}
}
|
python
|
def formfields_for_xmlobject(model, fields=None, exclude=None, widgets=None, options=None,
declared_subforms=None, max_num=None, extra=None):
"""
Returns three sorted dictionaries (:class:`django.utils.datastructures.SortedDict`).
* The first is a dictionary of form fields based on the
:class:`~eulxml.xmlmap.XmlObject` class fields and their types.
* The second is a sorted dictionary of subform classes for any fields of type
:class:`~eulxml.xmlmap.fields.NodeField` on the model.
* The third is a sorted dictionary of formsets for any fields of type
:class:`~eulxml.xmlmap.fields.NodeListField` on the model.
Default sorting (within each dictionary) is by XmlObject field creation order.
Used by :class:`XmlObjectFormType` to set up a new :class:`XmlObjectForm`
class.
:param fields: optional list of field names; if specified, only the named fields
will be returned, in the specified order
:param exclude: optional list of field names that should not be included on
the form; if a field is listed in both ``fields`` and ``exclude``,
it will be excluded
:param widgets: optional dictionary of widget options to be passed to form
field constructor, keyed on field name
:param options: optional :class:`~django.forms.models.ModelFormOptions`.
if specified then fields, exclude, and widgets will default
to its values.
:param declared_subforms: optional dictionary of field names and form classes;
if specified, the specified form class will be used to initialize
the corresponding subform (for a :class:`~eulxml.xmlmap.fields.NodeField`)
or a formset (for a :class:`~eulxml.xmlmap.fields.NodeListField`)
:param max_num: optional value for the maximum number of times a fieldset should repeat.
:param max_num: optional value for the number of extra forms to provide.
"""
# first collect fields and excludes for the form and all subforms. base
# these on the specified options object unless overridden in args.
fieldlist = getattr(options, 'parsed_fields', None)
if isinstance(fields, ParsedFieldList):
fieldlist = fields
elif fields is not None:
fieldlist = _parse_field_list(fields, include_parents=True)
excludelist = getattr(options, 'parsed_exclude', None)
if isinstance(fields, ParsedFieldList):
fieldlist = fields
elif exclude is not None:
excludelist = _parse_field_list(exclude, include_parents=False)
if widgets is None and options is not None:
widgets = options.widgets
if max_num is None and options is not None:
max_num = options.max_num
# collect the fields (unordered for now) that we're going to be returning
formfields = {}
subforms = {}
formsets = {}
field_order = {}
subform_labels = {}
for name, field in six.iteritems(model._fields):
if fieldlist and not name in fieldlist.fields:
# if specific fields have been requested and this is not one of them, skip it
continue
if excludelist and name in excludelist.fields:
# if exclude has been specified and this field is listed, skip it
continue
if widgets and name in widgets:
# if a widget has been specified for this field, pass as option to form field init
kwargs = {'widget': widgets[name] }
else:
kwargs = {}
# get apppropriate form widget based on xmlmap field type
field_type = None
# if the xmlmap field knows whether or not it is required, use for form
if field.required is not None:
kwargs['required'] = field.required
if field.verbose_name is not None:
kwargs['label'] = field.verbose_name
if field.help_text is not None:
kwargs['help_text'] = field.help_text
if hasattr(field, 'choices') and field.choices:
# if a field has choices defined, use a choice field (no matter what base type)
field_type = ChoiceField
kwargs['choices'] = [(val, val) for val in field.choices]
# FIXME: how to properly do non-required choice field?
# if field is optional, add a blank choice at the beginning of the list
if field.required == False and '' not in field.choices:
# TODO: add an empty_label option (like django ModelChoiceField)
# to xmlobjectform and pass it in to make this easier to customize
kwargs['choices'].insert(0, ('', ''))
elif isinstance(field, xmlmap.fields.StringField):
field_type = CharField
elif isinstance(field, xmlmap.fields.IntegerField):
field_type = IntegerField
elif isinstance(field, xmlmap.fields.DateField):
field_type = DateField
elif isinstance(field, xmlmap.fields.SimpleBooleanField):
# by default, fields are required - for a boolean, required means it must be checked
# since that seems nonsensical and not useful for a boolean,
# setting required to False to allow True or False values
kwargs['required'] = False
field_type = BooleanField
# datefield ? - not yet well-supported; leaving out for now
# ... should probably distinguish between date and datetime field
elif isinstance(field, xmlmap.fields.NodeField) or \
isinstance(field, xmlmap.fields.NodeListField):
form_label = kwargs['label'] if 'label' in kwargs else fieldname_to_label(name)
# store subform label in case we can't set on subform/formset
subform_labels[name] = form_label
# if a subform class was declared, use that class exactly as is
if name in declared_subforms:
subform = declared_subforms[name]
# otherwise, define a new xmlobject form for the nodefield or
# nodelistfield class, using any options passed in for fields under this one
else:
subform_opts = {
'fields': fieldlist.subfields[name] if fieldlist and name in fieldlist.subfields else None,
'exclude': excludelist.subfields[name] if excludelist and name in excludelist.subfields else None,
'widgets': widgets[name] if widgets and name in widgets else None,
'label': form_label,
}
# create the subform class
subform = xmlobjectform_factory(field.node_class, **subform_opts)
# store subform or generate and store formset, depending on field type
if isinstance(field, xmlmap.fields.NodeField):
subforms[name] = subform
elif isinstance(field, xmlmap.fields.NodeListField):
# formset_factory is from django core and we link into it here.
formsets[name] = formset_factory(subform, formset=BaseXmlObjectFormSet,
max_num=subform._meta.max_num, can_delete=subform._meta.can_delete,
extra=subform._meta.extra, can_order=subform._meta.can_order)
formsets[name].form_label = form_label
elif isinstance(field, xmlmap.fields.StringListField) or \
isinstance(field, xmlmap.fields.IntegerListField):
form_label = kwargs['label'] if 'label' in kwargs else fieldname_to_label(name)
if isinstance(field, xmlmap.fields.IntegerListField):
listform = IntegerListFieldForm
else:
listform = ListFieldForm
# generate a listfield formset
formsets[name] = formset_factory(listform, formset=BaseXmlObjectListFieldFormSet)
# don't need can_delete: since each form is a single field, empty implies delete
# todo: extra, max_num ? widget?
formsets[name].form_label = form_label
# TODO: other list variants
else:
# raise exception for unsupported fields
# currently doesn't handle list fields
raise Exception('Error on field "%s": XmlObjectForm does not yet support auto form field generation for %s.' \
% (name, field.__class__))
if field_type is not None:
if 'label' not in kwargs:
kwargs['label'] = fieldname_to_label(name)
formfields[name] = field_type(**kwargs)
# create a dictionary indexed by field creation order, for default field ordering
field_order[field.creation_counter] = name
# if fields were explicitly specified, return them in that order
if fieldlist:
ordered_fields = SortedDict((name, formfields[name])
for name in fieldlist.fields
if name in formfields)
ordered_subforms = SortedDict((name, subforms[name])
for name in fieldlist.fields
if name in subforms)
ordered_formsets = SortedDict((name, formsets[name])
for name in fieldlist.fields
if name in formsets)
else:
# sort on field creation counter and generate a django sorted dictionary
ordered_fields = SortedDict(
[(field_order[key], formfields[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in formfields ]
)
ordered_subforms = SortedDict(
[(field_order[key], subforms[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in subforms ]
)
ordered_formsets = SortedDict(
[(field_order[key], formsets[field_order[key]]) for key in sorted(field_order.keys())
if field_order[key] in formsets ]
)
return ordered_fields, ordered_subforms, ordered_formsets, subform_labels
|
java
|
public PlatformResponse disconnect(String consumerKey, String consumerSecret,
String accessToken, String accessTokenSecret)
throws ConnectionException {
httpClient = new PlatformHttpClient(consumerKey, consumerSecret,
accessToken, accessTokenSecret);
return this.httpClient.disconnect();
}
|
java
|
@Override
public java.util.concurrent.Future<DeleteStreamResult> deleteStreamAsync(String streamName,
com.amazonaws.handlers.AsyncHandler<DeleteStreamRequest, DeleteStreamResult> asyncHandler) {
return deleteStreamAsync(new DeleteStreamRequest().withStreamName(streamName), asyncHandler);
}
|
python
|
def set_section_order(self, section_name_list):
"""Set the order of the sections, which are by default unorderd.
Any unlisted sections that exist will be placed at the end of the
document in no particular order.
"""
self.section_headings = section_name_list[:]
for section_name in self.sections.keys():
if section_name not in section_name_list:
self.section_headings.append(section_name)
return
|
python
|
def _build_wsgi_env(event, app_name):
"""Turn the Lambda/API Gateway request event into a WSGI environment dict.
:param dict event:
The event parameters passed to the Lambda function entrypoint.
:param str app_name:
Name of the API application.
"""
gateway = event['parameters']['gateway']
request = event['parameters']['request']
ctx = event['rawContext']
headers = request['header']
body = six.text_type(json.dumps(request['body']))
# Render the path correctly so connexion/flask will pass the path params to
# the handler function correctly.
# Basically, this replaces "/foo/{param1}/bar/{param2}" with
# "/foo/123/bar/456".
path = gateway['resource-path'].format(
**event['parameters']['request']['path']
)
environ = {
'PATH_INFO': path,
'QUERY_STRING': urlencode(request['querystring']),
'REMOTE_ADDR': ctx['identity']['sourceIp'],
'REQUEST_METHOD': ctx['httpMethod'],
'SCRIPT_NAME': app_name,
'SERVER_NAME': app_name,
'SERVER_PORT': headers.get('X-Forwarded-Port', '80'),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': headers.get('X-Forwarded-Proto', 'http'),
'wsgi.input': StringIO(body),
'wsgi.errors': StringIO(),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'CONTENT_TYPE': headers.get('Content-Type', 'application/json'),
}
if ctx['httpMethod'] in ['POST', 'PUT', 'PATCH']:
environ['CONTENT_LENGTH'] = str(len(body))
for header_name, header_value in headers.items():
wsgi_name = 'HTTP_{}'.format(header_name.upper().replace('-', '_'))
environ[wsgi_name] = str(header_value)
return environ
|
java
|
AtomSymbol alignTo(SymbolAlignment alignment) {
return new AtomSymbol(element, adjuncts, annotationAdjuncts, alignment, hull);
}
|
python
|
def _determine_colorspace(self, colorspace=None, **kwargs):
"""Determine the colorspace from the supplied inputs.
Parameters
----------
colorspace : str, optional
Either 'rgb' or 'gray'.
"""
if colorspace is None:
# Must infer the colorspace from the image dimensions.
if len(self.shape) < 3:
# A single channel image is grayscale.
self._colorspace = opj2.CLRSPC_GRAY
elif self.shape[2] == 1 or self.shape[2] == 2:
# A single channel image or an image with two channels is going
# to be greyscale.
self._colorspace = opj2.CLRSPC_GRAY
else:
# Anything else must be RGB, right?
self._colorspace = opj2.CLRSPC_SRGB
else:
if colorspace.lower() not in ('rgb', 'grey', 'gray'):
msg = 'Invalid colorspace "{0}".'.format(colorspace)
raise IOError(msg)
elif colorspace.lower() == 'rgb' and self.shape[2] < 3:
msg = 'RGB colorspace requires at least 3 components.'
raise IOError(msg)
# Turn the colorspace from a string to the enumerated value that
# the library expects.
COLORSPACE_MAP = {'rgb': opj2.CLRSPC_SRGB,
'gray': opj2.CLRSPC_GRAY,
'grey': opj2.CLRSPC_GRAY,
'ycc': opj2.CLRSPC_YCC}
self._colorspace = COLORSPACE_MAP[colorspace.lower()]
|
python
|
def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable classifier model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are
provided. For example, a target variable with 'cat' and 'dog' as
possible values is mapped to 0 and 1 respectively with 0 being the base
class and 1 being the reference class. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained classifier model.
See Also
--------
turicreate.boosted_trees_classifier.BoostedTreesClassifier,
turicreate.logistic_classifier.LogisticClassifier,
turicreate.svm_classifier.SVMClassifier,
turicreate.nearest_neighbor_classifier.NearestNeighborClassifier
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
# Selects the best model based on your data.
>>> model = tc.classifier.create(data, target='is_expensive',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.classify(data)
>>> results = model.evaluate(data)
"""
return _sl.create_classification_with_model_selector(
dataset,
target,
model_selector = _turicreate.extensions._supervised_learning._classifier_available_models,
features = features,
validation_set = validation_set,
verbose = verbose)
|
python
|
def digest(self):
"""Get digest of data seen thus far as a list of bytes."""
total = 0 # number of triplets seen
if self.count == 3: # 3 chars = 1 triplet
total = 1
elif self.count == 4: # 4 chars = 4 triplets
total = 4
elif self.count > 4: # otherwise 8 triplets/char less
total = 8 * self.count - 28 # 28 'missed' during 'ramp-up'
threshold = total / 256 # threshold for accumulators, using the mean
code = [0]*32 # start with all zero bits
for i in range(256): # for all 256 accumulators
if self.acc[i] > threshold: # if it meets the threshold
code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8)
return code[::-1]
|
python
|
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers):
"""Add sinusoids of different frequencies as layer (vertical) timing signal.
Args:
x: a Tensor with shape [batch, length, channels]
layer: layer num
num_layers: total number of layers
Returns:
a Tensor the same shape as x.
"""
channels = common_layers.shape_list(x)[-1]
signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers)
return x + signal
|
python
|
def infer_issubclass(callnode, context=None):
"""Infer issubclass() calls
:param nodes.Call callnode: an `issubclass` call
:param InferenceContext: the context for the inference
:rtype nodes.Const: Boolean Const value of the `issubclass` call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# issubclass doesn't support keyword arguments
raise UseInferenceDefault("TypeError: issubclass() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
try:
obj_type = next(obj_node.infer(context=context))
except InferenceError as exc:
raise UseInferenceDefault from exc
if not isinstance(obj_type, nodes.ClassDef):
raise UseInferenceDefault("TypeError: arg 1 must be class")
# The right hand argument is the class(es) that the given
# object is to be checked against.
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError as exc:
raise UseInferenceDefault from exc
try:
issubclass_bool = helpers.object_issubclass(obj_type, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc)) from exc
except MroError as exc:
raise UseInferenceDefault from exc
return nodes.Const(issubclass_bool)
|
java
|
@Override
public String onCompleted() throws Exception {
futureDone();
if (closed.get()) return "";
if (status == Socket.STATUS.ERROR) {
return "";
}
if (options.reconnect()) {
close(false);
if (options.reconnectTimeoutInMilliseconds() > 0) {
timer.schedule(new Runnable() {
public void run() {
status = Socket.STATUS.REOPENED;
reconnect();
}
}, options.reconnectTimeoutInMilliseconds(), TimeUnit.MILLISECONDS);
} else {
status = Socket.STATUS.REOPENED;
reconnect();
}
} else {
close();
}
return "";
}
|
java
|
public static CommerceCountry remove(long commerceCountryId)
throws com.liferay.commerce.exception.NoSuchCountryException {
return getPersistence().remove(commerceCountryId);
}
|
python
|
def cloud_providers_config(path,
env_var='SALT_CLOUD_PROVIDERS_CONFIG',
defaults=None):
'''
Read in the salt cloud providers configuration file
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_cloud_providers_config(overrides, defaults)
|
python
|
def gen_uid(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uid - UID in string format (16 characters 0..9,a..f)
}
"""
import uuid
import random
uid=str(uuid.uuid4().hex)
if len(uid)!=32:
return {'return':1, 'error':'problem generating UID : len='+str(len(uid))+' !=32'} # pragma: no cover
random.seed
x=random.randrange(0,16)
return {'return':0, 'data_uid':uid[x:x+16]}
|
python
|
def dump(self, f):
"""Write Wavefront data to file. Takes File object or filename."""
try:
f.write(self._data)
except AttributeError:
with open(f, 'w') as wf:
wf.write(self._data)
|
java
|
public QueryInfo withSelectFields(FieldInfo... selectFields) {
if (this.selectFields == null) {
setSelectFields(new com.amazonaws.internal.SdkInternalList<FieldInfo>(selectFields.length));
}
for (FieldInfo ele : selectFields) {
this.selectFields.add(ele);
}
return this;
}
|
python
|
def _par_read(dirname, compressed=True):
"""
Internal write function to read a formatted parameter file.
:type dirname: str
:param dirname: Directory to read the parameter file from.
:type compressed: bool
:param compressed: Whether the directory is compressed or not.
"""
templates = []
if compressed:
arc = tarfile.open(dirname, "r:*")
members = arc.getmembers()
_parfile = [member for member in members
if member.name.split(os.sep)[-1] ==
'template_parameters.csv']
if len(_parfile) == 0:
arc.close()
raise MatchFilterError(
'No template parameter file in archive')
parfile = arc.extractfile(_parfile[0])
else:
parfile = open(dirname + '/' + 'template_parameters.csv', 'r')
for line in parfile:
t_in = Template()
for key_pair in line.rstrip().split(','):
if key_pair.split(':')[0].strip() == 'name':
t_in.__dict__[key_pair.split(':')[0].strip()] = \
key_pair.split(':')[-1].strip()
elif key_pair.split(':')[0].strip() == 'filt_order':
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
int(key_pair.split(':')[-1])
except ValueError:
pass
else:
try:
t_in.__dict__[key_pair.split(':')[0].strip()] = \
float(key_pair.split(':')[-1])
except ValueError:
pass
templates.append(t_in)
parfile.close()
if compressed:
arc.close()
return templates
|
python
|
def _doublec(self, word):
"""doublec(word) is TRUE <=> word ends with a double consonant"""
if len(word) < 2:
return False
if (word[-1] != word[-2]):
return False
return self._cons(word, len(word)-1)
|
python
|
def setup(self, target=None, strict=False, minify=False, line_numbers=False, keep_lines=False, no_tco=False):
"""Initializes parsing parameters."""
if target is None:
target = ""
else:
target = str(target).replace(".", "")
if target in pseudo_targets:
target = pseudo_targets[target]
if target not in targets:
raise CoconutException(
"unsupported target Python version " + ascii(target),
extra="supported targets are " + ', '.join(ascii(t) for t in specific_targets) + ", or leave blank for universal",
)
logger.log_vars("Compiler args:", locals())
self.target, self.strict, self.minify, self.line_numbers, self.keep_lines, self.no_tco = (
target, strict, minify, line_numbers, keep_lines, no_tco,
)
|
java
|
public int getTotalDurationFromFrameDurations(int[] frameDurationMs) {
int totalMs = 0;
for (int i = 0; i < frameDurationMs.length; i++) {
totalMs += frameDurationMs[i];
}
return totalMs;
}
|
java
|
@Deprecated
public static StringBuffer convertToUnicode(UCharacterIterator src, int options)
throws StringPrepParseException{
return IDNA2003.convertToUnicode(src, options);
}
|
python
|
async def createWorkerType(self, *args, **kwargs):
"""
Create new Worker Type
Create a worker type. A worker type contains all the configuration
needed for the provisioner to manage the instances. Each worker type
knows which regions and which instance types are allowed for that
worker type. Remember that Capacity is the number of concurrent tasks
that can be run on a given EC2 resource and that Utility is the relative
performance rate between different instance types. There is no way to
configure different regions to have different sets of instance types
so ensure that all instance types are available in all regions.
This function is idempotent.
Once a worker type is in the provisioner, a back ground process will
begin creating instances for it based on its capacity bounds and its
pending task count from the Queue. It is the worker's responsibility
to shut itself down. The provisioner has a limit (currently 96hours)
for all instances to prevent zombie instances from running indefinitely.
The provisioner will ensure that all instances created are tagged with
aws resource tags containing the provisioner id and the worker type.
If provided, the secrets in the global, region and instance type sections
are available using the secrets api. If specified, the scopes provided
will be used to generate a set of temporary credentials available with
the other secrets.
This method takes input: ``http://schemas.taskcluster.net/aws-provisioner/v1/create-worker-type-request.json#``
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["createWorkerType"], *args, **kwargs)
|
python
|
def extremum_icohpvalue(self, summed_spin_channels=True, spin=Spin.up):
"""
get ICOHP/ICOOP of strongest bond
Args:
summed_spin_channels: Boolean to indicate whether the ICOHPs/ICOOPs of both spin channels should be summed
spin: if summed_spin_channels is equal to False, this spin indicates which spin channel should be returned
Returns:
lowest ICOHP/largest ICOOP value (i.e. ICOHP/ICOOP value of strongest bond)
"""
if not self._are_coops:
extremum = sys.float_info.max
else:
extremum = -sys.float_info.max
if not self._is_spin_polarized:
if spin == Spin.down:
warnings.warn("This spin channel does not exist. I am switching to Spin.up")
spin = Spin.up
for value in self._icohplist.values():
if not value.is_spin_polarized or not summed_spin_channels:
if not self._are_coops:
if value.icohpvalue(spin) < extremum:
extremum = value.icohpvalue(spin)
# print(extremum)
else:
if value.icohpvalue(spin) > extremum:
extremum = value.icohpvalue(spin)
# print(extremum)
else:
if not self._are_coops:
if value.summed_icohp < extremum:
extremum = value.summed_icohp
# print(extremum)
else:
if value.summed_icohp > extremum:
extremum = value.summed_icohp
# print(extremum)
return extremum
|
java
|
public HttpHeaders setBasicAuthentication(String username, String password) {
String userPass =
Preconditions.checkNotNull(username) + ":" + Preconditions.checkNotNull(password);
String encoded = Base64.encodeBase64String(StringUtils.getBytesUtf8(userPass));
return setAuthorization("Basic " + encoded);
}
|
java
|
public static base_response sync(nitro_service client, hafiles resource) throws Exception {
hafiles syncresource = new hafiles();
syncresource.mode = resource.mode;
return syncresource.perform_operation(client,"sync");
}
|
java
|
protected <T> Searchable<T> configureMatcher(final Searchable<T> searchable) {
if (isCustomMatcherAllowed()) {
Matcher<T> matcher = searchable.getMatcher();
if (matcher != null) {
MatcherHolder.set(matcher);
}
}
return searchable;
}
|
python
|
def stop(self):
""" Stop logging with this logger.
"""
if not self.active:
return
self.removeHandler(self.handlers[-1])
self.active = False
return
|
java
|
public final void setDialogHeaderIconTintMode(@NonNull final PorterDuff.Mode mode) {
Condition.INSTANCE.ensureNotNull(mode, "The dialog icon tint mode may not be null");
this.dialogHeaderIconTintMode = mode;
}
|
java
|
public boolean containsCycle() throws WikiApiException {
DefaultEdge edge = findCycle();
if (edge != null) {
Category sourceCat = wiki.getCategory(categoryGraph.getGraph().getEdgeSource(edge));
Category targetCat = wiki.getCategory(categoryGraph.getGraph().getEdgeTarget(edge));
logger.info("Cycle: " + sourceCat.getTitle() + " - " + targetCat.getTitle());
return true;
}
else {
return false;
}
}
|
python
|
def get_formset_class(self, form, name):
"""
Either return the formset class that was provided as argument to the
__init__ method, or build one based on the ``parent_model`` and
``model`` attributes.
"""
if self.formset_class is not None:
return self.formset_class
formset_class = inlineformset_factory(
self.get_parent_model(form, name),
self.get_model(form, name),
**self.formset_factory_kwargs)
return formset_class
|
java
|
@Override
public Request<UpdateSecurityGroupRuleDescriptionsIngressRequest> getDryRunRequest() {
Request<UpdateSecurityGroupRuleDescriptionsIngressRequest> request = new UpdateSecurityGroupRuleDescriptionsIngressRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
}
|
python
|
def run(self, num_runs, show_trails, log_file_base):
"""
Run each agent in the world for 'num_runs' iterations
Optionally saves grid results to file if base name is
passed to method.
"""
print("--------------------------------------------------")
print("Starting Simulation - target = ", self.agent_list[0].target_y, self.agent_list[0].target_x)
self.world.grd.set_tile(self.agent_list[0].target_y , self.agent_list[0].target_x , 'T')
self.highlight_cell_surroundings(self.agent_list[0].target_y, self.agent_list[0].target_x)
self.start_all_agents()
# save the agents results here
try:
with open (log_file_base + '__agents.txt', "w") as f:
f.write("Starting World = \n")
f.write(str(self.world.grd))
except Exception:
print('Cant save log results to ' + log_file_base)
for cur_run in range(0,num_runs):
print("WorldSimulation:run#", cur_run)
for num, agt in enumerate(self.agent_list):
if show_trails == 'Y':
if len(self.agent_list) == 1 or len(self.agent_list) > 9:
self.world.grd.set_tile(agt.current_y, agt.current_x, 'o')
else:
self.world.grd.set_tile(agt.current_y, agt.current_x, str(num))
agt.do_your_job()
self.world.grd.set_tile(agt.current_y, agt.current_x, 'A') # update the main world grid with agents changes
# save grid after each run if required
if log_file_base != 'N':
self.world.grd.save(log_file_base + '_' + str(cur_run) + '.log')
# save the agents results here
with open (log_file_base + '__agents.txt', "a") as f:
f.write("\nWorld tgt= [" + str(self.agent_list[0].target_y) + "," + str(self.agent_list[0].target_x) + "]\n")
f.write(str(self.world.grd))
f.write('\n\nAgent Name , starting, num Steps , num Climbs\n')
for num, agt in enumerate(self.agent_list):
res = agt.name + ' , [' + str(agt.start_y) + ', ' + str(agt.start_x) + '], '
res += str(agt.num_steps) + ' , ' + str(agt.num_climbs) + ' , '
res += ''.join([a for a in agt.results])
f.write(res + '\n')
|
java
|
public boolean remove(Object o) {
boolean ret = super.remove(WeakElement.create(o));
processQueue();
return ret;
}
|
python
|
def about_axis(cls, center, angle, axis, invert=False):
"""Create transformation that represents a rotation about an axis
Arguments:
| ``center`` -- Point on the axis
| ``angle`` -- Rotation angle
| ``axis`` -- Rotation axis
| ``invert`` -- When True, an inversion rotation is constructed
[default=False]
"""
return Translation(center) * \
Rotation.from_properties(angle, axis, invert) * \
Translation(-center)
|
java
|
public void accept(PainterVisitor visitor, Object group, Bbox bounds, boolean recursive) {
if (googleMap == null) {
// create as first child of raster group
map.getRasterContext().drawGroup(null, this);
String id = map.getRasterContext().getId(this);
String graphicsId = map.getVectorContext().getId();
googleMap = createGoogleMap(id, graphicsId, type.name(), showMap, getVerticalMargin(),
getHorizontalMargin(), getVerticalAlignmentString());
}
}
|
python
|
def get_itoken(self, env):
"""Returns the current internal token to use for the auth system's own
actions with other services. Each process will create its own
itoken and the token will be deleted and recreated based on the
token_life configuration value. The itoken information is stored in
memcache because the auth process that is asked by Swift to validate
the token may not be the same as the auth process that created the
token.
"""
if not self.itoken or self.itoken_expires < time() or \
env.get('HTTP_X_AUTH_NEW_TOKEN', 'false').lower() in \
TRUE_VALUES:
self.itoken = '%sitk%s' % (self.reseller_prefix, uuid4().hex)
memcache_key = '%s/auth/%s' % (self.reseller_prefix, self.itoken)
self.itoken_expires = time() + self.token_life
memcache_client = cache_from_env(env)
if not memcache_client:
raise Exception(
'No memcache set up; required for Swauth middleware')
memcache_client.set(
memcache_key,
(self.itoken_expires,
'.auth,.reseller_admin,%s.auth' % self.reseller_prefix),
time=self.token_life)
return self.itoken
|
java
|
public PasswordAuthentication unregisterPasswordAuthentication(InetAddress pAddress, int pPort, String pProtocol, String pPrompt, String pScheme) {
return passwordAuthentications.remove(new AuthKey(pAddress, pPort, pProtocol, pPrompt, pScheme));
}
|
java
|
private PlayState3 findPlayState3() {
PlayState3 result = PLAY_STATE_3_MAP.get(packetBytes[157]);
if (result == null) {
return PlayState3.UNKNOWN;
}
return result;
}
|
python
|
def dfa_to_dot(dfa: dict, name: str, path: str = './'):
""" Generates a DOT file and a relative SVG image in **path**
folder of the input DFA using graphviz library.
:param dict dfa: DFA to export;
:param str name: name of the output file;
:param str path: path where to save the DOT/SVG files (default:
working directory)
"""
g = graphviz.Digraph(format='svg')
g.node('fake', style='invisible')
for state in dfa['states']:
if state == dfa['initial_state']:
if state in dfa['accepting_states']:
g.node(str(state), root='true',
shape='doublecircle')
else:
g.node(str(state), root='true')
elif state in dfa['accepting_states']:
g.node(str(state), shape='doublecircle')
else:
g.node(str(state))
g.edge('fake', str(dfa['initial_state']), style='bold')
for transition in dfa['transitions']:
g.edge(str(transition[0]),
str(dfa['transitions'][transition]),
label=transition[1])
if not os.path.exists(path):
os.makedirs(path)
g.render(filename=os.path.join(path, name + '.dot'))
|
java
|
@Override
public final void remove() throws RemoteException, RemoveException {
final boolean isTraceOn = TraceComponent.isAnyTracingEnabled();
if (isTraceOn && tc.isEntryEnabled())
Tr.entry(tc, "remove");
//canBeRemoved();//94781
// For stateless beans, it is not upto the client to remove objects
// so just return at this point. The container can decide to remove
// objects at some point (the pool manager drives this decision)
// at which point we will call ejbRemove on the bean
if (isTraceOn && tc.isEntryEnabled())
Tr.exit(tc, "remove");
return;
}
|
python
|
def find_children(self, pattern=r".*", flags=0, candidates=None):
"""
Finds the children matching the given patten.
Usage::
>>> node_a = AbstractCompositeNode("MyNodeA")
>>> node_b = AbstractCompositeNode("MyNodeB", node_a)
>>> node_c = AbstractCompositeNode("MyNodeC", node_a)
>>> node_a.find_children("c", re.IGNORECASE)
[<AbstractCompositeNode object at 0x101078040>]
:param pattern: Matching pattern.
:type pattern: unicode
:param flags: Matching regex flags.
:type flags: int
:param candidates: Matching candidates.
:type candidates: list
:return: Matching children.
:rtype: list
"""
if candidates is None:
candidates = []
for child in self.__children:
if re.search(pattern, child.name, flags):
child not in candidates and candidates.append(child)
child.find_children(pattern, flags, candidates)
return candidates
|
java
|
public final void init(int opmode, Certificate certificate,
SecureRandom random)
throws InvalidKeyException {
initialized = false;
checkOpmode(opmode);
// Check key usage if the certificate is of
// type X.509.
if (certificate instanceof java.security.cert.X509Certificate) {
// Check whether the cert has a key usage extension
// marked as a critical extension.
X509Certificate cert = (X509Certificate) certificate;
Set critSet = cert.getCriticalExtensionOIDs();
if (critSet != null && !critSet.isEmpty()
&& critSet.contains(KEY_USAGE_EXTENSION_OID)) {
boolean[] keyUsageInfo = cert.getKeyUsage();
// keyUsageInfo[2] is for keyEncipherment;
// keyUsageInfo[3] is for dataEncipherment.
if ((keyUsageInfo != null) &&
(((opmode == Cipher.ENCRYPT_MODE) &&
(keyUsageInfo.length > 3) &&
(keyUsageInfo[3] == false)) ||
((opmode == Cipher.WRAP_MODE) &&
(keyUsageInfo.length > 2) &&
(keyUsageInfo[2] == false)))) {
throw new InvalidKeyException("Wrong key usage");
}
}
}
PublicKey publicKey =
(certificate == null ? null : certificate.getPublicKey());
try {
chooseProvider(InitType.KEY, opmode, (Key) publicKey, null, null, random);
} catch (InvalidAlgorithmParameterException e) {
// should never occur
throw new InvalidKeyException(e);
}
initialized = true;
this.opmode = opmode;
}
|
python
|
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray according to this PrimitiveType
definition.
NOTE: The parameter ``raw`` is present to adhere to the
``decode()`` inteface, but has no effect for PrimitiveType
definitions.
"""
return struct.unpack(self.format, buffer(bytes))[0]
|
python
|
def _make_rewritten_pyc(state, fn, pyc, co):
"""Try to dump rewritten code to *pyc*."""
if sys.platform.startswith("win"):
# Windows grants exclusive access to open files and doesn't have atomic
# rename, so just write into the final file.
_write_pyc(state, co, fn, pyc)
else:
# When not on windows, assume rename is atomic. Dump the code object
# into a file specific to this process and atomically replace it.
proc_pyc = pyc + "." + str(os.getpid())
if _write_pyc(state, co, fn, proc_pyc):
os.rename(proc_pyc, pyc)
|
java
|
public static <T> T wrap(Operation<T> operation) throws EvernoteException {
try {
return operation.execute();
} catch (Exception ex) {
throw convert(ex);
}
}
|
java
|
@Override
public CompletableFuture<Void> execute(CommitEvent event) {
String scope = event.getScope();
String stream = event.getStream();
OperationContext context = streamMetadataStore.createContext(scope, stream);
log.debug("Attempting to commit available transactions on stream {}/{}", event.getScope(), event.getStream());
CompletableFuture<Void> future = new CompletableFuture<>();
// Note: we will ignore the epoch in the event. It has been deprecated.
// The logic now finds the smallest epoch with transactions and commits them.
tryCommitTransactions(scope, stream, context)
.whenComplete((r, e) -> {
if (e != null) {
Throwable cause = Exceptions.unwrap(e);
// for operation not allowed, we will report the event
if (cause instanceof StoreException.OperationNotAllowedException) {
log.debug("Cannot commit transaction on stream {}/{}. Postponing", scope, stream);
} else {
log.error("Exception while attempting to commit transaction on stream {}/{}", scope, stream, e);
}
future.completeExceptionally(cause);
} else {
if (r >= 0) {
log.debug("Successfully committed transactions on epoch {} on stream {}/{}", r, scope, stream);
} else {
log.debug("No transactions found in committing state on stream {}/{}", r, scope, stream);
}
if (processedEvents != null) {
try {
processedEvents.offer(event);
} catch (Exception ex) {
// ignore, this processed events is only added for enabling unit testing this class
}
}
future.complete(null);
}
});
return future;
}
|
python
|
def get_parameter(self, parameter):
"Return a dict for given parameter"
parameter = self._get_parameter_name(parameter)
return self._parameters[parameter]
|
java
|
public void eInit(SarlScript script, String name, IJvmTypeProvider context) {
setTypeResolutionContext(context);
if (this.sarlArtifact == null) {
this.sarlArtifact = SarlFactory.eINSTANCE.createSarlArtifact();
script.getXtendTypes().add(this.sarlArtifact);
this.sarlArtifact.setAnnotationInfo(XtendFactory.eINSTANCE.createXtendTypeDeclaration());
if (!Strings.isEmpty(name)) {
this.sarlArtifact.setName(name);
}
}
}
|
java
|
public Object addYToPoint(double y, Object point) {
((Point) point).setY(y);
return point;
}
|
java
|
public String rewrite() {
if (sqlTokens.isEmpty()) {
return originalSQL;
}
SQLBuilder result = new SQLBuilder(Collections.emptyList());
int count = 0;
for (SQLToken each : sqlTokens) {
if (0 == count) {
result.appendLiterals(originalSQL.substring(0, each.getStartIndex()));
}
if (each instanceof SchemaToken) {
appendSchemaPlaceholder(originalSQL, result, (SchemaToken) each, count);
}
count++;
}
return result.toSQL(masterSlaveRule, metaData.getDataSource());
}
|
python
|
def list_namespaced_pod_disruption_budget(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_pod_disruption_budget # noqa: E501
list or watch objects of kind PodDisruptionBudget # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod_disruption_budget(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1PodDisruptionBudgetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_disruption_budget_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_pod_disruption_budget_with_http_info(namespace, **kwargs) # noqa: E501
return data
|
python
|
def insert_meta_fields_into_existing_schema(graphql_schema):
"""Add compiler-specific meta-fields into all interfaces and types of the specified schema.
It is preferable to use the EXTENDED_META_FIELD_DEFINITIONS constant above to directly inject
the meta-fields during the initial process of building the schema, as that approach
is more robust. This function does its best to not mutate unexpected definitions, but
may break unexpectedly as the GraphQL standard is extended and the underlying
GraphQL library is updated.
Use this function at your own risk. Don't say you haven't been warned.
Properties added include:
- "_x_count", which allows filtering folds based on the number of elements they capture.
Args:
graphql_schema: GraphQLSchema object describing the schema that is going to be used with
the compiler. N.B.: MUTATED IN-PLACE in this method.
"""
root_type_name = graphql_schema.get_query_type().name
for type_name, type_obj in six.iteritems(graphql_schema.get_type_map()):
if type_name.startswith('__') or type_name == root_type_name:
# Ignore the types that are built into GraphQL itself, as well as the root query type.
continue
if not isinstance(type_obj, (GraphQLObjectType, GraphQLInterfaceType)):
# Ignore definitions that are not interfaces or types.
continue
for meta_field_name, meta_field in six.iteritems(EXTENDED_META_FIELD_DEFINITIONS):
if meta_field_name in type_obj.fields:
raise AssertionError(u'Unexpectedly encountered an existing field named {} while '
u'attempting to add a meta-field of the same name. Make sure '
u'you are not attempting to add meta-fields twice.'
.format(meta_field_name))
type_obj.fields[meta_field_name] = meta_field
|
java
|
public Graph<K, VV, EV> addEdge(Vertex<K, VV> source, Vertex<K, VV> target, EV edgeValue) {
Graph<K, VV, EV> partialGraph = fromCollection(Arrays.asList(source, target),
Collections.singletonList(new Edge<>(source.f0, target.f0, edgeValue)),
this.context);
return this.union(partialGraph);
}
|
python
|
def set_schedule(self, zone_info):
"""Sets the schedule for this zone"""
# must only POST json, otherwise server API handler raises exceptions
try:
json.loads(zone_info)
except ValueError as error:
raise ValueError("zone_info must be valid JSON: ", error)
headers = dict(self.client._headers()) # pylint: disable=protected-access
headers['Content-Type'] = 'application/json'
response = requests.put(
"https://tccna.honeywell.com/WebAPI/emea/api/v1"
"/%s/%s/schedule" % (self.zone_type, self.zoneId),
data=zone_info, headers=headers
)
response.raise_for_status()
return response.json()
|
python
|
def ComplementaryColor(self, mode='ryb'):
'''Create a new instance which is the complementary color of this one.
Parameters:
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb')
(0.0, 0.5, 1.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb').hsl
(210, 1, 0.5)
'''
h, s, l = self.__hsl
if mode == 'ryb': h = Color.RgbToRyb(h)
h = (h+180)%360
if mode == 'ryb': h = Color.RybToRgb(h)
return Color((h, s, l), 'hsl', self.__a, self.__wref)
|
java
|
public static boolean nodeMatchesClassByName(
Context context, AccessibilityNodeInfoCompat node, CharSequence referenceClassName) {
if ((node == null) || (referenceClassName == null)) {
return false;
}
// Attempt to take a shortcut.
final CharSequence nodeClassName = node.getClassName();
if (TextUtils.equals(nodeClassName, referenceClassName)) {
return true;
}
final ClassLoadingManager loader = ClassLoadingManager.getInstance();
final CharSequence appPackage = node.getPackageName();
return loader.checkInstanceOf(context, nodeClassName, appPackage, referenceClassName);
}
|
java
|
@Override
@ConstantTime(amortized = true)
public AddressableHeap.Handle<K, V> insert(K key, V value) {
if (other != this) {
throw new IllegalStateException("A heap cannot be used after a meld");
}
if (key == null) {
throw new NullPointerException("Null keys not permitted");
}
Node<K, V> n = new Node<K, V>(this, key, value);
addToRootList(n);
size++;
return n;
}
|
java
|
@Override
GroupReplyList write_attribute_reply_i(final int rid, final int tmo, final boolean fwd) throws DevFailed {
final Integer rid_obj = new Integer(rid);
final GroupReplyList rl = new GroupReplyList();
final AsynchRequest ar = arp.get(rid_obj);
if (ar == null) {
final DevError[] errors = new DevError[1];
errors[0] = new DevError();
errors[0].severity = ErrSeverity.ERR;
errors[0].reason = "API_BadAsynPollId";
errors[0].desc = "Invalid asynch. request identifier specified";
errors[0].origin = "GroupDeviceElement.write_attribute_reply";
final DevFailed e = new DevFailed(errors);
rl.add(new GroupReply(get_name(), "unknown", e));
return rl;
}
if (ar.req_id == -1) {
for (final String element : ar.obj_name) {
rl.add(new GroupReply(get_name(), element, ar.exception));
}
arp.remove(rid_obj);
return rl;
}
try {
proxy.write_attribute_reply(ar.req_id, tmo);
for (final String element : ar.obj_name) {
rl.add(new GroupReply(get_name(), element));
}
} catch (final AsynReplyNotArrived na) {
final DevError[] errors = new DevError[1];
errors[0] = new DevError();
errors[0].severity = ErrSeverity.ERR;
errors[0].reason = "API_AsynReplyNotArrived";
errors[0].desc = "No reply for asynch request";
errors[0].origin = "GroupDeviceElement.write_attribute_reply";
final DevFailed e = new DevFailed(errors);
for (final String element : ar.obj_name) {
rl.add(new GroupReply(get_name(), element, e));
}
} catch (final DevFailed df) {
for (final String element : ar.obj_name) {
rl.add(new GroupReply(get_name(), element, df));
}
} catch (final Exception ex) {
final DevError[] errors = new DevError[1];
errors[0] = new DevError();
errors[0].severity = ErrSeverity.ERR;
errors[0].reason = "unknown exception caught";
errors[0].desc = "unknown error";
errors[0].origin = "GroupDeviceElemnt.write_attribute_reply";
final DevFailed e = new DevFailed(errors);
for (final String element : ar.obj_name) {
rl.add(new GroupReply(get_name(), element, e));
}
}
arp.remove(rid_obj);
return rl;
}
|
java
|
public void addModifier(String modifier) {
if (!Strings.isEmpty(modifier)) {
this.sarlBehavior.getModifiers().add(modifier);
}
}
|
python
|
def observable_timestamp_compare(instance):
"""Ensure cyber observable timestamp properties with a comparison
requirement are valid.
"""
for key, obj in instance['objects'].items():
compares = enums.TIMESTAMP_COMPARE_OBSERVABLE.get(obj.get('type', ''), [])
print(compares)
for first, op, second in compares:
comp = getattr(operator, op)
comp_str = get_comparison_string(op)
if first in obj and second in obj and \
not comp(obj[first], obj[second]):
msg = "In object '%s', '%s' (%s) must be %s '%s' (%s)"
yield JSONError(msg % (key, first, obj[first], comp_str, second, obj[second]),
instance['id'])
|
java
|
@CheckForNull
static public BitSet getBytecodeSet(JavaClass clazz, Method method) {
XMethod xmethod = XFactory.createXMethod(clazz, method);
if (cachedBitsets().containsKey(xmethod)) {
return cachedBitsets().get(xmethod);
}
Code code = method.getCode();
if (code == null) {
return null;
}
byte[] instructionList = code.getCode();
// Create callback
UnpackedBytecodeCallback callback = new UnpackedBytecodeCallback(instructionList.length);
// Scan the method.
BytecodeScanner scanner = new BytecodeScanner();
scanner.scan(instructionList, callback);
UnpackedCode unpackedCode = callback.getUnpackedCode();
BitSet result = null;
if (unpackedCode != null) {
result = unpackedCode.getBytecodeSet();
}
cachedBitsets().put(xmethod, result);
return result;
}
|
java
|
public VirtualMachineExtensionInner beginCreateOrUpdate(String resourceGroupName, String vmName, String vmExtensionName, VirtualMachineExtensionInner extensionParameters) {
return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, vmName, vmExtensionName, extensionParameters).toBlocking().single().body();
}
|
java
|
public void buildDeprecationInfo(XMLNode node, Content fieldDocTree) {
writer.addDeprecated(
(FieldDoc) fields.get(currentFieldIndex), fieldDocTree);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.